diff --git a/js/src/irregexp/NativeRegExpMacroAssembler.cpp b/js/src/irregexp/NativeRegExpMacroAssembler.cpp index 5980bd1069d4..ebdab349157c 100644 --- a/js/src/irregexp/NativeRegExpMacroAssembler.cpp +++ b/js/src/irregexp/NativeRegExpMacroAssembler.cpp @@ -455,7 +455,7 @@ NativeRegExpMacroAssembler::GenerateCode(JSContext *cx) JS_ASSERT(!v.label); v.patchOffset.fixup(&masm); uintptr_t offset = masm.actualOffset(v.labelOffset); - Assembler::patchDataWithValueCheck(CodeLocationLabel(code, v.patchOffset), + Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, v.patchOffset), ImmPtr(code->raw() + offset), ImmPtr(0)); } diff --git a/js/src/jit/AsmJS.cpp b/js/src/jit/AsmJS.cpp index 1cb0669d9f46..02b250f06400 100644 --- a/js/src/jit/AsmJS.cpp +++ b/js/src/jit/AsmJS.cpp @@ -6752,7 +6752,7 @@ GenerateInterruptExit(ModuleCompiler &m, Label *throwLabel) // Pop resumePC into PC. Clobber HeapReg to make the jump and restore it // during jump delay slot. - JS_ASSERT(Imm16::isInSignedRange(AsmJSModule::heapGlobalDataOffset())); + JS_ASSERT(Imm16::IsInSignedRange(AsmJSModule::heapGlobalDataOffset())); masm.pop(HeapReg); masm.as_jr(HeapReg); masm.loadPtr(Address(GlobalReg, AsmJSModule::heapGlobalDataOffset()), HeapReg); diff --git a/js/src/jit/AsmJSModule.cpp b/js/src/jit/AsmJSModule.cpp index d1ecfbf14692..4f3034828681 100644 --- a/js/src/jit/AsmJSModule.cpp +++ b/js/src/jit/AsmJSModule.cpp @@ -374,7 +374,7 @@ AsmJSModule::finish(ExclusiveContext *cx, TokenStream &tokenStream, MacroAssembl if (!staticLinkData_.relativeLinks.append(link)) return false; - labelOffset = Assembler::extractCodeLabelOffset(code_ + patchAtOffset); + labelOffset = Assembler::ExtractCodeLabelOffset(code_ + patchAtOffset); } } @@ -399,7 +399,7 @@ AsmJSModule::finish(ExclusiveContext *cx, TokenStream &tokenStream, MacroAssembl RelativeLink link(RelativeLink::InstructionImmediate); link.patchAtOffset = masm.longJump(i); InstImm *inst = (InstImm *)(code_ + masm.longJump(i)); - link.targetOffset = Assembler::extractLuiOriValue(inst, inst->next()) - (uint32_t)code_; + link.targetOffset = Assembler::ExtractLuiOriValue(inst, inst->next()) - (uint32_t)code_; if (!staticLinkData_.relativeLinks.append(link)) return false; } @@ -597,12 +597,12 @@ AsmJSModule::staticallyLink(ExclusiveContext *cx) if (link.isRawPointerPatch()) *(uint8_t **)(patchAt) = target; else - Assembler::patchInstructionImmediate(patchAt, PatchedImmPtr(target)); + Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target)); } for (size_t i = 0; i < staticLinkData_.absoluteLinks.length(); i++) { AbsoluteLink link = staticLinkData_.absoluteLinks[i]; - Assembler::patchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()), + Assembler::PatchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()), PatchedImmPtr(AddressOf(link.target, cx)), PatchedImmPtr((void*)-1)); } @@ -642,7 +642,7 @@ AsmJSModule::initHeap(Handle heap, JSContext *cx) #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) uint32_t heapLength = heap->byteLength(); for (unsigned i = 0; i < heapAccesses_.length(); i++) { - jit::Assembler::updateBoundsCheck(heapLength, + jit::Assembler::UpdateBoundsCheck(heapLength, (jit::Instruction*)(heapAccesses_[i].offset() + code_)); } #endif @@ -652,11 +652,11 @@ void AsmJSModule::restoreToInitialState(ArrayBufferObject *maybePrevBuffer, ExclusiveContext *cx) { #ifdef DEBUG - // Put the absolute links back to -1 so patchDataWithValueCheck assertions + // Put the absolute links back to -1 so PatchDataWithValueCheck assertions // in staticallyLink are valid. for (size_t i = 0; i < staticLinkData_.absoluteLinks.length(); i++) { AbsoluteLink link = staticLinkData_.absoluteLinks[i]; - Assembler::patchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()), + Assembler::PatchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()), PatchedImmPtr((void*)-1), PatchedImmPtr(AddressOf(link.target, cx))); } diff --git a/js/src/jit/BaselineCompiler.cpp b/js/src/jit/BaselineCompiler.cpp index ff1f8c4205b6..ab54274b8649 100644 --- a/js/src/jit/BaselineCompiler.cpp +++ b/js/src/jit/BaselineCompiler.cpp @@ -218,7 +218,7 @@ BaselineCompiler::compile() label.fixup(&masm); size_t icEntry = icLoadLabels_[i].icEntry; ICEntry *entryAddr = &(baselineScript->icEntry(icEntry)); - Assembler::patchDataWithValueCheck(CodeLocationLabel(code, label), + Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label), ImmPtr(entryAddr), ImmPtr((void*)-1)); } diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp index a3b393df757a..c2693d731334 100644 --- a/js/src/jit/CodeGenerator.cpp +++ b/js/src/jit/CodeGenerator.cpp @@ -6723,7 +6723,7 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints) ionScript->setHasUncompiledCallTarget(); invalidateEpilogueData_.fixup(&masm); - Assembler::patchDataWithValueCheck(CodeLocationLabel(code, invalidateEpilogueData_), + Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, invalidateEpilogueData_), ImmPtr(ionScript), ImmPtr((void*)-1)); @@ -6745,7 +6745,7 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints) for (size_t i = 0; i < ionScriptLabels_.length(); i++) { ionScriptLabels_[i].fixup(&masm); - Assembler::patchDataWithValueCheck(CodeLocationLabel(code, ionScriptLabels_[i]), + Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, ionScriptLabels_[i]), ImmPtr(ionScript), ImmPtr((void*)-1)); } @@ -6783,14 +6783,14 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints) TraceLogger *logger = TraceLoggerForMainThread(cx->runtime()); for (uint32_t i = 0; i < patchableTraceLoggers_.length(); i++) { patchableTraceLoggers_[i].fixup(&masm); - Assembler::patchDataWithValueCheck(CodeLocationLabel(code, patchableTraceLoggers_[i]), + Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTraceLoggers_[i]), ImmPtr(logger), ImmPtr(nullptr)); } uint32_t scriptId = TraceLogCreateTextId(logger, script); for (uint32_t i = 0; i < patchableTLScripts_.length(); i++) { patchableTLScripts_[i].fixup(&masm); - Assembler::patchDataWithValueCheck(CodeLocationLabel(code, patchableTLScripts_[i]), + Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTLScripts_[i]), ImmPtr((void *) uintptr_t(scriptId)), ImmPtr((void *)0)); } diff --git a/js/src/jit/Ion.cpp b/js/src/jit/Ion.cpp index 568e77850dcd..c85f1bbbefe6 100644 --- a/js/src/jit/Ion.cpp +++ b/js/src/jit/Ion.cpp @@ -2621,14 +2621,14 @@ InvalidateActivation(FreeOp *fop, uint8_t *jitTop, bool invalidateAll) CodeLocationLabel dataLabelToMunge(it.returnAddressToFp()); ptrdiff_t delta = ionScript->invalidateEpilogueDataOffset() - (it.returnAddressToFp() - ionCode->raw()); - Assembler::patchWrite_Imm32(dataLabelToMunge, Imm32(delta)); + Assembler::PatchWrite_Imm32(dataLabelToMunge, Imm32(delta)); CodeLocationLabel osiPatchPoint = SafepointReader::InvalidationPatchPoint(ionScript, si); CodeLocationLabel invalidateEpilogue(ionCode, CodeOffsetLabel(ionScript->invalidateEpilogueOffset())); IonSpew(IonSpew_Invalidate, " ! Invalidate ionScript %p (ref %u) -> patching osipoint %p", ionScript, ionScript->refcount(), (void *) osiPatchPoint.raw()); - Assembler::patchWrite_NearCall(osiPatchPoint, invalidateEpilogue); + Assembler::PatchWrite_NearCall(osiPatchPoint, invalidateEpilogue); } IonSpew(IonSpew_Invalidate, "END invalidating activation"); diff --git a/js/src/jit/IonCaches.cpp b/js/src/jit/IonCaches.cpp index 01da291efeea..1ab7426f51e8 100644 --- a/js/src/jit/IonCaches.cpp +++ b/js/src/jit/IonCaches.cpp @@ -246,7 +246,7 @@ class IonCache::StubAttacher void patchStubCodePointer(MacroAssembler &masm, JitCode *code) { if (hasStubCodePatchOffset_) { stubCodePatchOffset_.fixup(&masm); - Assembler::patchDataWithValueCheck(CodeLocationLabel(code, stubCodePatchOffset_), + Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, stubCodePatchOffset_), ImmPtr(code), STUB_ADDR); } } @@ -373,7 +373,7 @@ DispatchIonCache::updateBaseAddress(JitCode *code, MacroAssembler &masm) IonCache::updateBaseAddress(code, masm); dispatchLabel_.fixup(&masm); - Assembler::patchDataWithValueCheck(CodeLocationLabel(code, dispatchLabel_), + Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, dispatchLabel_), ImmPtr(&firstStub_), ImmPtr((void*)-1)); firstStub_ = fallbackLabel_.raw(); diff --git a/js/src/jit/IonCaches.h b/js/src/jit/IonCaches.h index 1d3ede8b692d..a0a7868207d3 100644 --- a/js/src/jit/IonCaches.h +++ b/js/src/jit/IonCaches.h @@ -375,7 +375,7 @@ class RepatchIonCache : public IonCache #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) uint32_t i = 0; while (i < REJOIN_LABEL_OFFSET) - ptr = Assembler::nextInstruction(ptr, &i); + ptr = Assembler::NextInstruction(ptr, &i); #endif return CodeLocationLabel(ptr); } diff --git a/js/src/jit/IonFrames.cpp b/js/src/jit/IonFrames.cpp index fe6240a2d804..bbfcf34a4aa4 100644 --- a/js/src/jit/IonFrames.cpp +++ b/js/src/jit/IonFrames.cpp @@ -155,7 +155,7 @@ JitFrameIterator::checkInvalidation(IonScript **ionScriptOut) const int32_t invalidationDataOffset = ((int32_t *) returnAddr)[-1]; uint8_t *ionScriptDataOffset = returnAddr + invalidationDataOffset; - IonScript *ionScript = (IonScript *) Assembler::getPointer(ionScriptDataOffset); + IonScript *ionScript = (IonScript *) Assembler::GetPointer(ionScriptDataOffset); JS_ASSERT(ionScript->containsReturnAddress(returnAddr)); *ionScriptOut = ionScript; return true; @@ -1426,7 +1426,7 @@ OsiIndex::returnPointDisplacement() const // In general, pointer arithmetic on code is bad, but in this case, // getting the return address from a call instruction, stepping over pools // would be wrong. - return callPointDisplacement_ + Assembler::patchWrite_NearCallSize(); + return callPointDisplacement_ + Assembler::PatchWrite_NearCallSize(); } SnapshotIterator::SnapshotIterator(IonScript *ionScript, SnapshotOffset snapshotOffset, diff --git a/js/src/jit/IonMacroAssembler.h b/js/src/jit/IonMacroAssembler.h index 1be5c4f02638..a6462ee6d1af 100644 --- a/js/src/jit/IonMacroAssembler.h +++ b/js/src/jit/IonMacroAssembler.h @@ -902,7 +902,7 @@ class MacroAssembler : public MacroAssemblerSpecific // be unset if the code never needed to push its JitCode*. if (hasEnteredExitFrame()) { exitCodePatch_.fixup(this); - patchDataWithValueCheck(CodeLocationLabel(code, exitCodePatch_), + PatchDataWithValueCheck(CodeLocationLabel(code, exitCodePatch_), ImmPtr(code), ImmPtr((void*)-1)); } diff --git a/js/src/jit/LIR.h b/js/src/jit/LIR.h index 96a7d41b17c7..2495a2483b0b 100644 --- a/js/src/jit/LIR.h +++ b/js/src/jit/LIR.h @@ -1428,7 +1428,7 @@ class LSafepoint : public TempObject // In general, pointer arithmetic on code is bad, but in this case, // getting the return address from a call instruction, stepping over pools // would be wrong. - return osiCallPointOffset_ + Assembler::patchWrite_NearCallSize(); + return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize(); } uint32_t osiCallPointOffset() const { return osiCallPointOffset_; diff --git a/js/src/jit/Safepoints.cpp b/js/src/jit/Safepoints.cpp index 33a4a718e019..aca7bcfe3318 100644 --- a/js/src/jit/Safepoints.cpp +++ b/js/src/jit/Safepoints.cpp @@ -397,7 +397,7 @@ SafepointReader::SafepointReader(IonScript *script, const SafepointIndex *si) uint32_t SafepointReader::osiReturnPointOffset() const { - return osiCallPointOffset_ + Assembler::patchWrite_NearCallSize(); + return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize(); } CodeLocationLabel diff --git a/js/src/jit/arm/Architecture-arm.cpp b/js/src/jit/arm/Architecture-arm.cpp index bbf1841d0f5e..884f3fadf95e 100644 --- a/js/src/jit/arm/Architecture-arm.cpp +++ b/js/src/jit/arm/Architecture-arm.cpp @@ -160,9 +160,9 @@ uint32_t GetARMFlags() isSet = true; #if defined(__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__) // This should really be detected at runtime, but /proc/*/auxv - // doesn't seem to carry the ISA. We could look in - // /proc/cpuinfo as well, but the chances that it will be - // different from this are low. + // doesn't seem to carry the ISA. We could look in /proc/cpuinfo + // as well, but the chances that it will be different from this + // are low. flags |= HWCAP_ARMv7; #endif return flags; @@ -314,14 +314,14 @@ VFPRegister::ReduceSetForPush(const FloatRegisterSet &s) FloatRegisterSet mod; for (TypedRegisterIterator iter(s); iter.more(); iter++) { if ((*iter).isSingle()) { - // add in just this float + // Add in just this float. mod.addUnchecked(*iter); } else if ((*iter).id() < 16) { - // a double with an overlay, add in both floats + // A double with an overlay, add in both floats. mod.addUnchecked((*iter).singleOverlay(0)); mod.addUnchecked((*iter).singleOverlay(1)); } else { - // add in the lone double in the range 16-31 + // Add in the lone double in the range 16-31. mod.addUnchecked(*iter); } } diff --git a/js/src/jit/arm/Architecture-arm.h b/js/src/jit/arm/Architecture-arm.h index 8a0031398343..4cad367d0d27 100644 --- a/js/src/jit/arm/Architecture-arm.h +++ b/js/src/jit/arm/Architecture-arm.h @@ -14,7 +14,8 @@ #include "js/Utility.h" -// gcc appears to use __ARM_PCS_VFP to denote that the target is a hard-float target. +// Gcc appears to use __ARM_PCS_VFP to denote that the target is a hard-float +// target. #if defined(__ARM_PCS_VFP) #define JS_CODEGEN_ARM_HARDFP #endif @@ -38,11 +39,11 @@ static const uint32_t ShadowStackSpace = 0; // These offsets are related to bailouts. //// -// Size of each bailout table entry. On arm, this is presently -// a single call (which is wrong!). the call clobbers lr. -// For now, I've dealt with this by ensuring that we never allocate to lr. -// it should probably be 8 bytes, a mov of an immediate into r12 (not -// allocated presently, or ever) followed by a branch to the apropriate code. +// Size of each bailout table entry. On arm, this is presently a single call +// (which is wrong!). The call clobbers lr. +// For now, I've dealt with this by ensuring that we never allocate to lr. It +// should probably be 8 bytes, a mov of an immediate into r12 (not allocated +// presently, or ever) followed by a branch to the apropriate code. static const uint32_t BAILOUT_TABLE_ENTRY_SIZE = 4; class Registers @@ -139,7 +140,7 @@ class Registers // Registers returned from a JS -> C call. static const uint32_t CallMask = (1 << Registers::r0) | - (1 << Registers::r1); // used for double-size returns + (1 << Registers::r1); // Used for double-size returns. static const uint32_t AllocatableMask = AllMask & ~NonAllocatableMask; typedef uint32_t SetType; @@ -240,9 +241,9 @@ class TypedRegisterSet; class VFPRegister { public: - // What type of data is being stored in this register? - // UInt / Int are specifically for vcvt, where we need - // to know how the data is supposed to be converted. + // What type of data is being stored in this register? UInt / Int are + // specifically for vcvt, where we need to know how the data is supposed to + // be converted. enum RegType { Single = 0x0, Double = 0x1, @@ -255,13 +256,11 @@ class VFPRegister protected: RegType kind : 2; - // ARM doesn't have more than 32 registers... - // don't take more bits than we'll need. - // Presently, I don't have plans to address the upper - // and lower halves of the double registers seprately, so - // 5 bits should suffice. If I do decide to address them seprately - // (vmov, I'm looking at you), I will likely specify it as a separate - // field. + // ARM doesn't have more than 32 registers. Don't take more bits than we'll + // need. Presently, we don't have plans to address the upper and lower + // halves of the double registers seprately, so 5 bits should suffice. If we + // do decide to address them seprately (vmov, I'm looking at you), we will + // likely specify it as a separate field. public: Code code_ : 5; protected: @@ -307,7 +306,7 @@ class VFPRegister struct VFPRegIndexSplit; VFPRegIndexSplit encode(); - // for serializing values + // For serializing values. struct VFPRegIndexSplit { const uint32_t block : 4; const uint32_t bit : 1; @@ -325,8 +324,8 @@ class VFPRegister Code code() const { JS_ASSERT(!_isInvalid && !_isMissing); - // this should only be used in areas where we only have doubles - // and singles. + // This should only be used in areas where we only have doubles and + // singles. JS_ASSERT(isFloat()); return Code(code_); } @@ -391,8 +390,8 @@ class VFPRegister } // | d0 | // | s0 | s1 | - // if we've stored s0 and s1 in memory, we also want to say that d0 - // is stored there, but it is only stored at the location where it is aligned + // If we've stored s0 and s1 in memory, we also want to say that d0 is + // stored there, but it is only stored at the location where it is aligned // e.g. at s0, not s1. void alignedAliased(uint32_t aliasIdx, VFPRegister *ret) { if (aliasIdx == 0) { @@ -424,8 +423,7 @@ class VFPRegister }; -// The only floating point register set that we work with -// are the VFP Registers +// The only floating point register set that we work with are the VFP Registers. typedef VFPRegister FloatRegister; uint32_t GetARMFlags(); @@ -435,16 +433,16 @@ bool HasVFP(); bool Has32DP(); bool HasIDIV(); -// Arm/D32 has double registers that can NOT be treated as float32 -// and this requires some dances in lowering. +// Arm/D32 has double registers that can NOT be treated as float32 and this +// requires some dances in lowering. inline bool hasUnaliasedDouble() { return Has32DP(); } -// On ARM, Dn aliases both S2n and S2n+1, so if you need to convert a float32 -// to a double as a temporary, you need a temporary double register. +// On ARM, Dn aliases both S2n and S2n+1, so if you need to convert a float32 to +// a double as a temporary, you need a temporary double register. inline bool hasMultiAlias() { @@ -453,8 +451,9 @@ hasMultiAlias() bool ParseARMHwCapFlags(const char *armHwCap); -// If the simulator is used then the ABI choice is dynamic. Otherwise the ABI is static -// and useHardFpABI is inlined so that unused branches can be optimized away. +// If the simulator is used then the ABI choice is dynamic. Otherwise the ABI is +// static and useHardFpABI is inlined so that unused branches can be optimized +// away. #if defined(JS_ARM_SIMULATOR) bool UseHardFpABI(); #else diff --git a/js/src/jit/arm/Assembler-arm.cpp b/js/src/jit/arm/Assembler-arm.cpp index 852eca610565..eb3681b7c8a7 100644 --- a/js/src/jit/arm/Assembler-arm.cpp +++ b/js/src/jit/arm/Assembler-arm.cpp @@ -22,8 +22,8 @@ using namespace js::jit; using mozilla::CountLeadingZeroes32; -// Note this is used for inter-AsmJS calls and may pass arguments and results -// in floating point registers even if the system ABI does not. +// Note this is used for inter-AsmJS calls and may pass arguments and results in +// floating point registers even if the system ABI does not. ABIArgGenerator::ABIArgGenerator() : intRegIndex_(0), floatRegIndex_(0), @@ -66,8 +66,8 @@ ABIArgGenerator::next(MIRType type) const Register ABIArgGenerator::NonArgReturnVolatileReg0 = r4; const Register ABIArgGenerator::NonArgReturnVolatileReg1 = r5; -// Encode a standard register when it is being used as src1, the dest, and -// an extra register. These should never be called with an InvalidReg. +// Encode a standard register when it is being used as src1, the dest, and an +// extra register. These should never be called with an InvalidReg. uint32_t js::jit::RT(Register r) { @@ -96,8 +96,8 @@ js::jit::RM(Register r) return r.code() << 8; } -// Encode a standard register when it is being used as src1, the dest, and -// an extra register. For these, an InvalidReg is used to indicate a optional +// Encode a standard register when it is being used as src1, the dest, and an +// extra register. For these, an InvalidReg is used to indicate a optional // register that has been omitted. uint32_t js::jit::maybeRT(Register r) @@ -132,7 +132,7 @@ js::jit::maybeRD(Register r) Register js::jit::toRD(Instruction &i) { - return Register::FromCode((i.encode()>>12) & 0xf); + return Register::FromCode((i.encode() >> 12) & 0xf); } Register js::jit::toR(Instruction &i) @@ -143,13 +143,13 @@ js::jit::toR(Instruction &i) Register js::jit::toRM(Instruction &i) { - return Register::FromCode((i.encode()>>8) & 0xf); + return Register::FromCode((i.encode() >> 8) & 0xf); } Register js::jit::toRN(Instruction &i) { - return Register::FromCode((i.encode()>>16) & 0xf); + return Register::FromCode((i.encode() >> 16) & 0xf); } uint32_t @@ -158,7 +158,7 @@ js::jit::VD(VFPRegister vr) if (vr.isMissing()) return 0; - //bits 15,14,13,12, 22 + // Bits 15,14,13,12, 22. VFPRegister::VFPRegIndexSplit s = vr.encode(); return s.bit << 22 | s.block << 12; } @@ -168,7 +168,7 @@ js::jit::VN(VFPRegister vr) if (vr.isMissing()) return 0; - // bits 19,18,17,16, 7 + // Bits 19,18,17,16, 7. VFPRegister::VFPRegIndexSplit s = vr.encode(); return s.bit << 7 | s.block << 16; } @@ -178,7 +178,7 @@ js::jit::VM(VFPRegister vr) if (vr.isMissing()) return 0; - // bits 5, 3,2,1,0 + // Bits 5, 3,2,1,0. VFPRegister::VFPRegIndexSplit s = vr.encode(); return s.bit << 5 | s.block; } @@ -194,63 +194,63 @@ jit::VFPRegister::encode() case Single: return VFPRegIndexSplit(code_ >> 1, code_ & 1); default: - // vfp register treated as an integer, NOT a gpr + // VFP register treated as an integer, NOT a gpr. return VFPRegIndexSplit(code_ >> 1, code_ & 1); } } bool -InstDTR::isTHIS(const Instruction &i) +InstDTR::IsTHIS(const Instruction &i) { return (i.encode() & IsDTRMask) == (uint32_t)IsDTR; } InstDTR * -InstDTR::asTHIS(const Instruction &i) +InstDTR::AsTHIS(const Instruction &i) { - if (isTHIS(i)) + if (IsTHIS(i)) return (InstDTR*)&i; return nullptr; } bool -InstLDR::isTHIS(const Instruction &i) +InstLDR::IsTHIS(const Instruction &i) { return (i.encode() & IsDTRMask) == (uint32_t)IsDTR; } InstLDR * -InstLDR::asTHIS(const Instruction &i) +InstLDR::AsTHIS(const Instruction &i) { - if (isTHIS(i)) + if (IsTHIS(i)) return (InstLDR*)&i; return nullptr; } InstNOP * -InstNOP::asTHIS(Instruction &i) +InstNOP::AsTHIS(Instruction &i) { - if (isTHIS(i)) - return (InstNOP*) (&i); + if (IsTHIS(i)) + return (InstNOP*)&i; return nullptr; } bool -InstNOP::isTHIS(const Instruction &i) +InstNOP::IsTHIS(const Instruction &i) { return (i.encode() & 0x0fffffff) == NopInst; } bool -InstBranchReg::isTHIS(const Instruction &i) +InstBranchReg::IsTHIS(const Instruction &i) { - return InstBXReg::isTHIS(i) || InstBLXReg::isTHIS(i); + return InstBXReg::IsTHIS(i) || InstBLXReg::IsTHIS(i); } InstBranchReg * -InstBranchReg::asTHIS(const Instruction &i) +InstBranchReg::AsTHIS(const Instruction &i) { - if (isTHIS(i)) + if (IsTHIS(i)) return (InstBranchReg*)&i; return nullptr; } @@ -266,15 +266,15 @@ InstBranchReg::checkDest(Register dest) } bool -InstBranchImm::isTHIS(const Instruction &i) +InstBranchImm::IsTHIS(const Instruction &i) { - return InstBImm::isTHIS(i) || InstBLImm::isTHIS(i); + return InstBImm::IsTHIS(i) || InstBLImm::IsTHIS(i); } InstBranchImm * -InstBranchImm::asTHIS(const Instruction &i) +InstBranchImm::AsTHIS(const Instruction &i) { - if (isTHIS(i)) + if (IsTHIS(i)) return (InstBranchImm*)&i; return nullptr; } @@ -286,69 +286,69 @@ InstBranchImm::extractImm(BOffImm *dest) } bool -InstBXReg::isTHIS(const Instruction &i) +InstBXReg::IsTHIS(const Instruction &i) { return (i.encode() & IsBRegMask) == IsBX; } InstBXReg * -InstBXReg::asTHIS(const Instruction &i) +InstBXReg::AsTHIS(const Instruction &i) { - if (isTHIS(i)) + if (IsTHIS(i)) return (InstBXReg*)&i; return nullptr; } bool -InstBLXReg::isTHIS(const Instruction &i) +InstBLXReg::IsTHIS(const Instruction &i) { return (i.encode() & IsBRegMask) == IsBLX; } InstBLXReg * -InstBLXReg::asTHIS(const Instruction &i) +InstBLXReg::AsTHIS(const Instruction &i) { - if (isTHIS(i)) + if (IsTHIS(i)) return (InstBLXReg*)&i; return nullptr; } bool -InstBImm::isTHIS(const Instruction &i) +InstBImm::IsTHIS(const Instruction &i) { return (i.encode () & IsBImmMask) == IsB; } InstBImm * -InstBImm::asTHIS(const Instruction &i) +InstBImm::AsTHIS(const Instruction &i) { - if (isTHIS(i)) + if (IsTHIS(i)) return (InstBImm*)&i; return nullptr; } bool -InstBLImm::isTHIS(const Instruction &i) +InstBLImm::IsTHIS(const Instruction &i) { return (i.encode () & IsBImmMask) == IsBL; } InstBLImm * -InstBLImm::asTHIS(Instruction &i) +InstBLImm::AsTHIS(Instruction &i) { - if (isTHIS(i)) + if (IsTHIS(i)) return (InstBLImm*)&i; return nullptr; } bool -InstMovWT::isTHIS(Instruction &i) +InstMovWT::IsTHIS(Instruction &i) { - return InstMovW::isTHIS(i) || InstMovT::isTHIS(i); + return InstMovW::IsTHIS(i) || InstMovT::IsTHIS(i); } InstMovWT * -InstMovWT::asTHIS(Instruction &i) +InstMovWT::AsTHIS(Instruction &i) { - if (isTHIS(i)) + if (IsTHIS(i)) return (InstMovWT*)&i; return nullptr; } @@ -376,41 +376,41 @@ InstMovWT::checkDest(Register dest) } bool -InstMovW::isTHIS(const Instruction &i) +InstMovW::IsTHIS(const Instruction &i) { return (i.encode() & IsWTMask) == IsW; } InstMovW * -InstMovW::asTHIS(const Instruction &i) +InstMovW::AsTHIS(const Instruction &i) { - if (isTHIS(i)) - return (InstMovW*) (&i); + if (IsTHIS(i)) + return (InstMovW*)&i; return nullptr; } InstMovT * -InstMovT::asTHIS(const Instruction &i) +InstMovT::AsTHIS(const Instruction &i) { - if (isTHIS(i)) - return (InstMovT*) (&i); + if (IsTHIS(i)) + return (InstMovT*)&i; return nullptr; } bool -InstMovT::isTHIS(const Instruction &i) +InstMovT::IsTHIS(const Instruction &i) { return (i.encode() & IsWTMask) == IsT; } InstALU * -InstALU::asTHIS(const Instruction &i) +InstALU::AsTHIS(const Instruction &i) { - if (isTHIS(i)) - return (InstALU*) (&i); + if (IsTHIS(i)) + return (InstALU*)&i; return nullptr; } bool -InstALU::isTHIS(const Instruction &i) +InstALU::IsTHIS(const Instruction &i) { return (i.encode() & ALUMask) == 0; } @@ -453,31 +453,31 @@ InstALU::extractOp2() } InstCMP * -InstCMP::asTHIS(const Instruction &i) +InstCMP::AsTHIS(const Instruction &i) { - if (isTHIS(i)) - return (InstCMP*) (&i); + if (IsTHIS(i)) + return (InstCMP*)&i; return nullptr; } bool -InstCMP::isTHIS(const Instruction &i) +InstCMP::IsTHIS(const Instruction &i) { - return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkDest(r0) && InstALU::asTHIS(i)->checkOp(op_cmp); + return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkDest(r0) && InstALU::AsTHIS(i)->checkOp(OpCmp); } InstMOV * -InstMOV::asTHIS(const Instruction &i) +InstMOV::AsTHIS(const Instruction &i) { - if (isTHIS(i)) - return (InstMOV*) (&i); + if (IsTHIS(i)) + return (InstMOV*)&i; return nullptr; } bool -InstMOV::isTHIS(const Instruction &i) +InstMOV::IsTHIS(const Instruction &i) { - return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkOp1(r0) && InstALU::asTHIS(i)->checkOp(op_mov); + return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkOp1(r0) && InstALU::AsTHIS(i)->checkOp(OpMov); } Op2Reg @@ -514,8 +514,9 @@ Imm16::Imm16() void jit::PatchJump(CodeLocationJump &jump_, CodeLocationLabel label) { - // We need to determine if this jump can fit into the standard 24+2 bit address - // or if we need a larger branch (or just need to use our pool entry) + // We need to determine if this jump can fit into the standard 24+2 bit + // address or if we need a larger branch (or just need to use our pool + // entry). Instruction *jump = (Instruction*)jump_.raw(); // jumpWithPatch() returns the offset of the jump and never a pool or nop. Assembler::Condition c; @@ -523,13 +524,14 @@ jit::PatchJump(CodeLocationJump &jump_, CodeLocationLabel label) JS_ASSERT(jump->is() || jump->is()); int jumpOffset = label.raw() - jump_.raw(); - if (BOffImm::isInRange(jumpOffset)) { - // This instruction started off as a branch, and will remain one - Assembler::retargetNearBranch(jump, jumpOffset, c); + if (BOffImm::IsInRange(jumpOffset)) { + // This instruction started off as a branch, and will remain one. + Assembler::RetargetNearBranch(jump, jumpOffset, c); } else { - // This instruction started off as a branch, but now needs to be demoted to an ldr. + // This instruction started off as a branch, but now needs to be demoted + // to an ldr. uint8_t **slot = reinterpret_cast(jump_.jumpTableEntry()); - Assembler::retargetFarBranch(jump, slot, label.raw(), c); + Assembler::RetargetFarBranch(jump, slot, label.raw(), c); } } @@ -601,7 +603,7 @@ Assembler::actualOffset(BufferOffset off_) const class RelocationIterator { CompactBufferReader reader_; - // offset in bytes + // Offset in bytes. uint32_t offset_; public: @@ -623,7 +625,7 @@ class RelocationIterator template const uint32_t * -Assembler::getCF32Target(Iter *iter) +Assembler::GetCF32Target(Iter *iter) { Instruction *inst1 = iter->cur(); Instruction *inst2 = iter->next(); @@ -631,7 +633,7 @@ Assembler::getCF32Target(Iter *iter) Instruction *inst4 = iter->next(); if (inst1->is()) { - // see if we have a simple case, b #offset + // See if we have a simple case, b #offset. BOffImm imm; InstBranchImm *jumpB = inst1->as(); jumpB->extractImm(&imm); @@ -641,15 +643,15 @@ Assembler::getCF32Target(Iter *iter) if (inst1->is() && inst2->is() && (inst3->is() || inst3->is() || inst4->is())) { - // see if we have the complex case, - // movw r_temp, #imm1 - // movt r_temp, #imm2 - // bx r_temp + // See if we have the complex case: + // movw r_temp, #imm1 + // movt r_temp, #imm2 + // bx r_temp // OR - // movw r_temp, #imm1 - // movt r_temp, #imm2 - // str pc, [sp] - // bx r_temp + // movw r_temp, #imm1 + // movt r_temp, #imm2 + // str pc, [sp] + // bx r_temp Imm16 targ_bot; Imm16 targ_top; @@ -669,8 +671,8 @@ Assembler::getCF32Target(Iter *iter) // Make sure we're branching to the same register. #ifdef DEBUG - // A toggled call sometimes has a NOP instead of a branch for the third instruction. - // No way to assert that it's valid in that situation. + // A toggled call sometimes has a NOP instead of a branch for the third + // instruction. No way to assert that it's valid in that situation. if (!inst3->is()) { InstBranchReg *realBranch = inst3->is() ? inst3->as() : inst4->as(); @@ -685,7 +687,7 @@ Assembler::getCF32Target(Iter *iter) if (inst1->is()) { InstLDR *load = inst1->as(); uint32_t inst = load->encode(); - // get the address of the instruction as a raw pointer + // Get the address of the instruction as a raw pointer. char *dataInst = reinterpret_cast(load); IsUp_ iu = IsUp_(inst & IsUp); int32_t offset = inst & 0xfff; @@ -701,24 +703,24 @@ Assembler::getCF32Target(Iter *iter) } uintptr_t -Assembler::getPointer(uint8_t *instPtr) +Assembler::GetPointer(uint8_t *instPtr) { InstructionIterator iter((Instruction*)instPtr); - uintptr_t ret = (uintptr_t)getPtr32Target(&iter, nullptr, nullptr); + uintptr_t ret = (uintptr_t)GetPtr32Target(&iter, nullptr, nullptr); return ret; } template const uint32_t * -Assembler::getPtr32Target(Iter *start, Register *dest, RelocStyle *style) +Assembler::GetPtr32Target(Iter *start, Register *dest, RelocStyle *style) { Instruction *load1 = start->cur(); Instruction *load2 = start->next(); if (load1->is() && load2->is()) { - // see if we have the complex case, - // movw r_temp, #imm1 - // movt r_temp, #imm2 + // See if we have the complex case: + // movw r_temp, #imm1 + // movt r_temp, #imm2 Imm16 targ_bot; Imm16 targ_top; @@ -747,7 +749,7 @@ Assembler::getPtr32Target(Iter *start, Register *dest, RelocStyle *style) if (load1->is()) { InstLDR *load = load1->as(); uint32_t inst = load->encode(); - // get the address of the instruction as a raw pointer + // Get the address of the instruction as a raw pointer. char *dataInst = reinterpret_cast(load); IsUp_ iu = IsUp_(inst & IsUp); int32_t offset = inst & 0xfff; @@ -760,13 +762,14 @@ Assembler::getPtr32Target(Iter *start, Register *dest, RelocStyle *style) uint32_t **ptr = (uint32_t **)&dataInst[offset + 8]; return *ptr; } + MOZ_ASSUME_UNREACHABLE("unsupported relocation"); } static JitCode * CodeFromJump(InstructionIterator *jump) { - uint8_t *target = (uint8_t *)Assembler::getCF32Target(jump); + uint8_t *target = (uint8_t *)Assembler::GetCF32Target(jump); return JitCode::FromExecutable(target); } @@ -787,7 +790,7 @@ TraceDataRelocations(JSTracer *trc, uint8_t *buffer, CompactBufferReader &reader while (reader.more()) { size_t offset = reader.readUnsigned(); InstructionIterator iter((Instruction*)(buffer + offset)); - void *ptr = const_cast(Assembler::getPtr32Target(&iter)); + void *ptr = const_cast(Assembler::GetPtr32Target(&iter)); // No barrier needed since these are constants. gc::MarkGCThingUnbarriered(trc, reinterpret_cast(&ptr), "ion-masm-ptr"); } @@ -800,7 +803,7 @@ TraceDataRelocations(JSTracer *trc, ARMBuffer *buffer, for (unsigned int idx = 0; idx < locs->length(); idx++) { BufferOffset bo = (*locs)[idx]; ARMBuffer::AssemblerBufferInstIterator iter(bo, buffer); - void *ptr = const_cast(Assembler::getPtr32Target(&iter)); + void *ptr = const_cast(Assembler::GetPtr32Target(&iter)); // No barrier needed since these are constants. gc::MarkGCThingUnbarriered(trc, reinterpret_cast(&ptr), "ion-masm-ptr"); @@ -864,8 +867,8 @@ Assembler::writeCodePointer(AbsoluteLabel *absoluteLabel) { JS_ASSERT(!absoluteLabel->bound()); BufferOffset off = writeInst(LabelBase::INVALID_OFFSET); - // x86/x64 makes general use of AbsoluteLabel and weaves a linked list of - // uses of an AbsoluteLabel through the assembly. ARM only uses labels + // The x86/x64 makes general use of AbsoluteLabel and weaves a linked list + // of uses of an AbsoluteLabel through the assembly. ARM only uses labels // for the case statements of switch jump tables. Thus, for simplicity, we // simply treat the AbsoluteLabel as a label and bind it to the offset of // the jump table entry that needs to be patched. @@ -889,26 +892,27 @@ Assembler::InvertCondition(Condition cond) } Imm8::TwoImm8mData -Imm8::encodeTwoImms(uint32_t imm) +Imm8::EncodeTwoImms(uint32_t imm) { - // In the ideal case, we are looking for a number that (in binary) looks like: - // 0b((00)*)n_1((00)*)n_2((00)*) - // left n1 mid n2 - // where both n_1 and n_2 fit into 8 bits. - // since this is being done with rotates, we also need to handle the case + // In the ideal case, we are looking for a number that (in binary) looks + // like: + // 0b((00)*)n_1((00)*)n_2((00)*) + // left n1 mid n2 + // where both n_1 and n_2 fit into 8 bits. + // Since this is being done with rotates, we also need to handle the case // that one of these numbers is in fact split between the left and right // sides, in which case the constant will look like: - // 0bn_1a((00)*)n_2((00)*)n_1b - // n1a mid n2 rgh n1b - // also remember, values are rotated by multiples of two, and left, - // mid or right can have length zero + // 0bn_1a((00)*)n_2((00)*)n_1b + // n1a mid n2 rgh n1b + // Also remember, values are rotated by multiples of two, and left, mid or + // right can have length zero. uint32_t imm1, imm2; int left = CountLeadingZeroes32(imm) & 0x1E; uint32_t no_n1 = imm & ~(0xff << (24 - left)); - // not technically needed: this case only happens if we can encode - // as a single imm8m. There is a perfectly reasonable encoding in this - // case, but we shouldn't encourage people to do things like this. + // Not technically needed: this case only happens if we can encode as a + // single imm8m. There is a perfectly reasonable encoding in this case, but + // we shouldn't encourage people to do things like this. if (no_n1 == 0) return TwoImm8mData(); @@ -916,16 +920,16 @@ Imm8::encodeTwoImms(uint32_t imm) uint32_t no_n2 = no_n1 & ~((0xff << ((24 - mid) & 0x1f)) | 0xff >> ((8 + mid) & 0x1f)); if (no_n2 == 0) { - // we hit the easy case, no wraparound. - // note: a single constant *may* look like this. + // We hit the easy case, no wraparound. + // Note: a single constant *may* look like this. int imm1shift = left + 8; int imm2shift = mid + 8; imm1 = (imm >> (32 - imm1shift)) & 0xff; if (imm2shift >= 32) { imm2shift = 0; - // this assert does not always hold - //assert((imm & 0xff) == no_n1); - // in fact, this would lead to some incredibly subtle bugs. + // This assert does not always hold, in fact, this would lead to + // some incredibly subtle bugs. + // assert((imm & 0xff) == no_n1); imm2 = no_n1; } else { imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff; @@ -938,42 +942,41 @@ Imm8::encodeTwoImms(uint32_t imm) datastore::Imm8mData(imm2, imm2shift >> 1)); } - // either it wraps, or it does not fit. - // if we initially chopped off more than 8 bits, then it won't fit. + // Either it wraps, or it does not fit. If we initially chopped off more + // than 8 bits, then it won't fit. if (left >= 8) return TwoImm8mData(); int right = 32 - (CountLeadingZeroes32(no_n2) & 30); - // all remaining set bits *must* fit into the lower 8 bits - // the right == 8 case should be handled by the previous case. + // All remaining set bits *must* fit into the lower 8 bits. + // The right == 8 case should be handled by the previous case. if (right > 8) return TwoImm8mData(); - // make sure the initial bits that we removed for no_n1 - // fit into the 8-(32-right) leftmost bits - if (((imm & (0xff << (24 - left))) << (8-right)) != 0) { + // Make sure the initial bits that we removed for no_n1 fit into the + // 8-(32-right) leftmost bits. + if (((imm & (0xff << (24 - left))) << (8 - right)) != 0) { // BUT we may have removed more bits than we needed to for no_n1 - // 0x04104001 e.g. we can encode 0x104 with a single op, then - // 0x04000001 with a second, but we try to encode 0x0410000 - // and find that we need a second op for 0x4000, and 0x1 cannot - // be included in the encoding of 0x04100000 - no_n1 = imm & ~((0xff >> (8-right)) | (0xff << (24 + right))); + // 0x04104001 e.g. we can encode 0x104 with a single op, then 0x04000001 + // with a second, but we try to encode 0x0410000 and find that we need a + // second op for 0x4000, and 0x1 cannot be included in the encoding of + // 0x04100000. + no_n1 = imm & ~((0xff >> (8 - right)) | (0xff << (24 + right))); mid = CountLeadingZeroes32(no_n1) & 30; - no_n2 = - no_n1 & ~((0xff << ((24 - mid)&31)) | 0xff >> ((8 + mid)&31)); + no_n2 = no_n1 & ~((0xff << ((24 - mid)&31)) | 0xff >> ((8 + mid)&31)); if (no_n2 != 0) return TwoImm8mData(); } - // now assemble all of this information into a two coherent constants - // it is a rotate right from the lower 8 bits. + // Now assemble all of this information into a two coherent constants it is + // a rotate right from the lower 8 bits. int imm1shift = 8 - right; imm1 = 0xff & ((imm << imm1shift) | (imm >> (32 - imm1shift))); - JS_ASSERT ((imm1shift&~0x1e) == 0); + JS_ASSERT ((imm1shift & ~0x1e) == 0); // left + 8 + mid is the position of the leftmost bit of n_2. - // we needed to rotate 0x000000ab right by 8 in order to get - // 0xab000000, then shift again by the leftmost bit in order to - // get the constant that we care about. + // We needed to rotate 0x000000ab right by 8 in order to get 0xab000000, + // then shift again by the leftmost bit in order to get the constant that we + // care about. int imm2shift = mid + 8; imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff; JS_ASSERT((imm1shift & 0x1) == 0); @@ -985,60 +988,60 @@ Imm8::encodeTwoImms(uint32_t imm) ALUOp jit::ALUNeg(ALUOp op, Register dest, Imm32 *imm, Register *negDest) { - // find an alternate ALUOp to get the job done, and use a different imm. + // Find an alternate ALUOp to get the job done, and use a different imm. *negDest = dest; switch (op) { - case op_mov: + case OpMov: *imm = Imm32(~imm->value); - return op_mvn; - case op_mvn: + return OpMvn; + case OpMvn: *imm = Imm32(~imm->value); - return op_mov; - case op_and: + return OpMov; + case OpAnd: *imm = Imm32(~imm->value); - return op_bic; - case op_bic: + return OpBic; + case OpBic: *imm = Imm32(~imm->value); - return op_and; - case op_add: + return OpAnd; + case OpAdd: *imm = Imm32(-imm->value); - return op_sub; - case op_sub: + return OpSub; + case OpSub: *imm = Imm32(-imm->value); - return op_add; - case op_cmp: + return OpAdd; + case OpCmp: *imm = Imm32(-imm->value); - return op_cmn; - case op_cmn: + return OpCmn; + case OpCmn: *imm = Imm32(-imm->value); - return op_cmp; - case op_tst: + return OpCmp; + case OpTst: JS_ASSERT(dest == InvalidReg); *imm = Imm32(~imm->value); *negDest = ScratchRegister; - return op_bic; + return OpBic; // orr has orn on thumb2 only. default: - return op_invalid; + return OpInvalid; } } bool jit::can_dbl(ALUOp op) { - // some instructions can't be processed as two separate instructions - // such as and, and possibly add (when we're setting ccodes). - // there is also some hilarity with *reading* condition codes. - // for example, adc dest, src1, 0xfff; (add with carry) can be split up - // into adc dest, src1, 0xf00; add dest, dest, 0xff, since "reading" the - // condition code increments the result by one conditionally, that only needs - // to be done on one of the two instructions. + // Some instructions can't be processed as two separate instructions such as + // and, and possibly add (when we're setting ccodes). There is also some + // hilarity with *reading* condition codes. For example, adc dest, src1, + // 0xfff; (add with carry) can be split up into adc dest, src1, 0xf00; add + // dest, dest, 0xff, since "reading" the condition code increments the + // result by one conditionally, that only needs to be done on one of the two + // instructions. switch (op) { - case op_bic: - case op_add: - case op_sub: - case op_eor: - case op_orr: + case OpBic: + case OpAdd: + case OpSub: + case OpEor: + case OpOrr: return true; default: return false; @@ -1047,26 +1050,25 @@ jit::can_dbl(ALUOp op) bool jit::condsAreSafe(ALUOp op) { - // Even when we are setting condition codes, sometimes we can - // get away with splitting an operation into two. - // for example, if our immediate is 0x00ff00ff, and the operation is eors - // we can split this in half, since x ^ 0x00ff0000 ^ 0x000000ff should - // set all of its condition codes exactly the same as x ^ 0x00ff00ff. - // However, if the operation were adds, - // we cannot split this in half. If the source on the add is - // 0xfff00ff0, the result sholud be 0xef10ef, but do we set the overflow bit - // or not? Depending on which half is performed first (0x00ff0000 - // or 0x000000ff) the V bit will be set differently, and *not* updating - // the V bit would be wrong. Theoretically, the following should work - // adds r0, r1, 0x00ff0000; - // addsvs r0, r1, 0x000000ff; - // addvc r0, r1, 0x000000ff; - // but this is 3 instructions, and at that point, we might as well use + // Even when we are setting condition codes, sometimes we can get away with + // splitting an operation into two. For example, if our immediate is + // 0x00ff00ff, and the operation is eors we can split this in half, since x + // ^ 0x00ff0000 ^ 0x000000ff should set all of its condition codes exactly + // the same as x ^ 0x00ff00ff. However, if the operation were adds, we + // cannot split this in half. If the source on the add is 0xfff00ff0, the + // result sholud be 0xef10ef, but do we set the overflow bit or not? + // Depending on which half is performed first (0x00ff0000 or 0x000000ff) the + // V bit will be set differently, and *not* updating the V bit would be + // wrong. Theoretically, the following should work: + // adds r0, r1, 0x00ff0000; + // addsvs r0, r1, 0x000000ff; + // addvc r0, r1, 0x000000ff; + // But this is 3 instructions, and at that point, we might as well use // something else. switch(op) { - case op_bic: - case op_orr: - case op_eor: + case OpBic: + case OpOrr: + case OpEor: return true; default: return false; @@ -1076,17 +1078,17 @@ jit::condsAreSafe(ALUOp op) { ALUOp jit::getDestVariant(ALUOp op) { - // all of the compare operations are dest-less variants of a standard - // operation. Given the dest-less variant, return the dest-ful variant. + // All of the compare operations are dest-less variants of a standard + // operation. Given the dest-less variant, return the dest-ful variant. switch (op) { - case op_cmp: - return op_sub; - case op_cmn: - return op_add; - case op_tst: - return op_and; - case op_teq: - return op_eor; + case OpCmp: + return OpSub; + case OpCmn: + return OpAdd; + case OpTst: + return OpAnd; + case OpTeq: + return OpEor; default: return op; } @@ -1158,7 +1160,7 @@ jit::asr (Register r, Register amt) static js::jit::DoubleEncoder doubleEncoder; -/* static */ const js::jit::VFPImm js::jit::VFPImm::one(0x3FF00000); +/* static */ const js::jit::VFPImm js::jit::VFPImm::One(0x3FF00000); js::jit::VFPImm::VFPImm(uint32_t top) { @@ -1176,13 +1178,13 @@ BOffImm::BOffImm(Instruction &inst) Instruction * BOffImm::getDest(Instruction *src) { - // TODO: It is probably worthwhile to verify that src is actually a branch + // TODO: It is probably worthwhile to verify that src is actually a branch. // NOTE: This does not explicitly shift the offset of the destination left by 2, // since it is indexing into an array of instruction sized objects. - return &src[(((int32_t)data<<8)>>8) + 2]; + return &src[(((int32_t)data << 8) >> 8) + 2]; } -//VFPRegister implementation +// VFPRegister implementation VFPRegister VFPRegister::doubleOverlay(unsigned int which) const { @@ -1196,7 +1198,7 @@ VFPRegister::singleOverlay(unsigned int which) const { JS_ASSERT(!_isInvalid); if (kind == Double) { - // There are no corresponding float registers for d16-d31 + // There are no corresponding float registers for d16-d31. JS_ASSERT(code_ < 16); JS_ASSERT(which < 2); return VFPRegister((code_ << 1) + which, Single); @@ -1210,7 +1212,7 @@ VFPRegister::sintOverlay(unsigned int which) const { JS_ASSERT(!_isInvalid); if (kind == Double) { - // There are no corresponding float registers for d16-d31 + // There are no corresponding float registers for d16-d31. JS_ASSERT(code_ < 16); JS_ASSERT(which < 2); return VFPRegister((code_ << 1) + which, Int); @@ -1223,7 +1225,7 @@ VFPRegister::uintOverlay(unsigned int which) const { JS_ASSERT(!_isInvalid); if (kind == Double) { - // There are no corresponding float registers for d16-d31 + // There are no corresponding float registers for d16-d31. JS_ASSERT(code_ < 16); JS_ASSERT(which < 2); return VFPRegister((code_ << 1) + which, UInt); @@ -1262,9 +1264,9 @@ Assembler::addCodeLabel(CodeLabel label) return codeLabels_.append(label); } -// Size of the instruction stream, in bytes. Including pools. This function expects -// all pools that need to be placed have been placed. If they haven't then we -// need to go an flush the pools :( +// Size of the instruction stream, in bytes. Including pools. This function +// expects all pools that need to be placed have been placed. If they haven't +// then we need to go an flush the pools :( size_t Assembler::size() const { @@ -1298,14 +1300,14 @@ Assembler::bytesNeeded() const preBarrierTableBytes(); } -// write a blob of binary into the instruction stream +// Write a blob of binary into the instruction stream. BufferOffset Assembler::writeInst(uint32_t x, uint32_t *dest) { if (dest == nullptr) return m_buffer.putInt(x); - writeInstStatic(x, dest); + WriteInstStatic(x, dest); return BufferOffset(); } BufferOffset @@ -1314,7 +1316,7 @@ Assembler::writeBranchInst(uint32_t x) return m_buffer.putInt(x, /* markAsBranch = */ true); } void -Assembler::writeInstStatic(uint32_t x, uint32_t *dest) +Assembler::WriteInstStatic(uint32_t x, uint32_t *dest) { JS_ASSERT(dest != nullptr); *dest = x; @@ -1342,6 +1344,7 @@ Assembler::align(int alignment) return ret; } + BufferOffset Assembler::as_nop() { @@ -1359,94 +1362,94 @@ Assembler::as_alu(Register dest, Register src1, Operand2 op2, BufferOffset Assembler::as_mov(Register dest, Operand2 op2, SetCond_ sc, Condition c, Instruction *instdest) { - return as_alu(dest, InvalidReg, op2, op_mov, sc, c, instdest); + return as_alu(dest, InvalidReg, op2, OpMov, sc, c, instdest); } BufferOffset Assembler::as_mvn(Register dest, Operand2 op2, SetCond_ sc, Condition c) { - return as_alu(dest, InvalidReg, op2, op_mvn, sc, c); + return as_alu(dest, InvalidReg, op2, OpMvn, sc, c); } // Logical operations. BufferOffset Assembler::as_and(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) { - return as_alu(dest, src1, op2, op_and, sc, c); + return as_alu(dest, src1, op2, OpAnd, sc, c); } BufferOffset Assembler::as_bic(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) { - return as_alu(dest, src1, op2, op_bic, sc, c); + return as_alu(dest, src1, op2, OpBic, sc, c); } BufferOffset Assembler::as_eor(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) { - return as_alu(dest, src1, op2, op_eor, sc, c); + return as_alu(dest, src1, op2, OpEor, sc, c); } BufferOffset Assembler::as_orr(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) { - return as_alu(dest, src1, op2, op_orr, sc, c); + return as_alu(dest, src1, op2, OpOrr, sc, c); } // Mathematical operations. BufferOffset Assembler::as_adc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) { - return as_alu(dest, src1, op2, op_adc, sc, c); + return as_alu(dest, src1, op2, OpAdc, sc, c); } BufferOffset Assembler::as_add(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) { - return as_alu(dest, src1, op2, op_add, sc, c); + return as_alu(dest, src1, op2, OpAdd, sc, c); } BufferOffset Assembler::as_sbc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) { - return as_alu(dest, src1, op2, op_sbc, sc, c); + return as_alu(dest, src1, op2, OpSbc, sc, c); } BufferOffset Assembler::as_sub(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) { - return as_alu(dest, src1, op2, op_sub, sc, c); + return as_alu(dest, src1, op2, OpSub, sc, c); } BufferOffset Assembler::as_rsb(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) { - return as_alu(dest, src1, op2, op_rsb, sc, c); + return as_alu(dest, src1, op2, OpRsb, sc, c); } BufferOffset Assembler::as_rsc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) { - return as_alu(dest, src1, op2, op_rsc, sc, c); + return as_alu(dest, src1, op2, OpRsc, sc, c); } // Test operations. BufferOffset Assembler::as_cmn(Register src1, Operand2 op2, Condition c) { - return as_alu(InvalidReg, src1, op2, op_cmn, SetCond, c); + return as_alu(InvalidReg, src1, op2, OpCmn, SetCond, c); } BufferOffset Assembler::as_cmp(Register src1, Operand2 op2, Condition c) { - return as_alu(InvalidReg, src1, op2, op_cmp, SetCond, c); + return as_alu(InvalidReg, src1, op2, OpCmp, SetCond, c); } BufferOffset Assembler::as_teq(Register src1, Operand2 op2, Condition c) { - return as_alu(InvalidReg, src1, op2, op_teq, SetCond, c); + return as_alu(InvalidReg, src1, op2, OpTeq, SetCond, c); } BufferOffset Assembler::as_tst(Register src1, Operand2 op2, Condition c) { - return as_alu(InvalidReg, src1, op2, op_tst, SetCond, c); + return as_alu(InvalidReg, src1, op2, OpTst, SetCond, c); } -// Not quite ALU worthy, but useful none the less: -// These also have the isue of these being formatted -// completly differently from the standard ALU operations. +// Not quite ALU worthy, but these are useful none the less. These also have +// the isue of these being formatted completly differently from the standard ALU +// operations. BufferOffset Assembler::as_movw(Register dest, Imm16 imm, Condition c, Instruction *pos) { @@ -1472,51 +1475,51 @@ Assembler::as_genmul(Register dhi, Register dlo, Register rm, Register rn, BufferOffset Assembler::as_mul(Register dest, Register src1, Register src2, SetCond_ sc, Condition c) { - return as_genmul(dest, InvalidReg, src1, src2, opm_mul, sc, c); + return as_genmul(dest, InvalidReg, src1, src2, OpmMul, sc, c); } BufferOffset Assembler::as_mla(Register dest, Register acc, Register src1, Register src2, SetCond_ sc, Condition c) { - return as_genmul(dest, acc, src1, src2, opm_mla, sc, c); + return as_genmul(dest, acc, src1, src2, OpmMla, sc, c); } BufferOffset Assembler::as_umaal(Register destHI, Register destLO, Register src1, Register src2, Condition c) { - return as_genmul(destHI, destLO, src1, src2, opm_umaal, NoSetCond, c); + return as_genmul(destHI, destLO, src1, src2, OpmUmaal, NoSetCond, c); } BufferOffset Assembler::as_mls(Register dest, Register acc, Register src1, Register src2, Condition c) { - return as_genmul(dest, acc, src1, src2, opm_mls, NoSetCond, c); + return as_genmul(dest, acc, src1, src2, OpmMls, NoSetCond, c); } BufferOffset Assembler::as_umull(Register destHI, Register destLO, Register src1, Register src2, SetCond_ sc, Condition c) { - return as_genmul(destHI, destLO, src1, src2, opm_umull, sc, c); + return as_genmul(destHI, destLO, src1, src2, OpmUmull, sc, c); } BufferOffset Assembler::as_umlal(Register destHI, Register destLO, Register src1, Register src2, SetCond_ sc, Condition c) { - return as_genmul(destHI, destLO, src1, src2, opm_umlal, sc, c); + return as_genmul(destHI, destLO, src1, src2, OpmUmlal, sc, c); } BufferOffset Assembler::as_smull(Register destHI, Register destLO, Register src1, Register src2, SetCond_ sc, Condition c) { - return as_genmul(destHI, destLO, src1, src2, opm_smull, sc, c); + return as_genmul(destHI, destLO, src1, src2, OpmSmull, sc, c); } BufferOffset Assembler::as_smlal(Register destHI, Register destLO, Register src1, Register src2, SetCond_ sc, Condition c) { - return as_genmul(destHI, destLO, src1, src2, opm_smlal, sc, c); + return as_genmul(destHI, destLO, src1, src2, OpmSmlal, sc, c); } BufferOffset @@ -1531,9 +1534,8 @@ Assembler::as_udiv(Register rd, Register rn, Register rm, Condition c) return writeInst(0x0730f010 | c | RN(rd) | RM(rm) | rn.code()); } -// Data transfer instructions: ldr, str, ldrb, strb. -// Using an int to differentiate between 8 bits and 32 bits is -// overkill, but meh +// Data transfer instructions: ldr, str, ldrb, strb. Using an int to +// differentiate between 8 bits and 32 bits is overkill, but meh. BufferOffset Assembler::as_dtr(LoadStore ls, int size, Index mode, Register rt, DTRAddr addr, Condition c, uint32_t *dest) @@ -1547,82 +1549,83 @@ Assembler::as_dtr(LoadStore ls, int size, Index mode, class PoolHintData { public: enum LoadType { - // set 0 to bogus, since that is the value most likely to be + // Set 0 to bogus, since that is the value most likely to be // accidentally left somewhere. - poolBOGUS = 0, - poolDTR = 1, - poolBranch = 2, - poolVDTR = 3 + PoolBOGUS = 0, + PoolDTR = 1, + PoolBranch = 2, + PoolVDTR = 3 }; private: - uint32_t index : 16; - uint32_t cond : 4; - LoadType loadType : 2; - uint32_t destReg : 5; - uint32_t destType : 1; + uint32_t index_ : 16; + uint32_t cond_ : 4; + LoadType loadType_ : 2; + uint32_t destReg_ : 5; + uint32_t destType_ : 1; uint32_t ONES : 4; - static const uint32_t expectedOnes = 0xfu; + static const uint32_t ExpectedOnes = 0xfu; public: - void init(uint32_t index_, Assembler::Condition cond_, LoadType lt, Register destReg_) { - index = index_; - JS_ASSERT(index == index_); - cond = cond_ >> 28; - JS_ASSERT(cond == cond_ >> 28); - loadType = lt; - ONES = expectedOnes; - destReg = destReg_.code(); - destType = 0; + void init(uint32_t index, Assembler::Condition cond, LoadType lt, Register destReg) { + index_ = index; + JS_ASSERT(index_ == index); + cond_ = cond >> 28; + JS_ASSERT(cond_ == cond >> 28); + loadType_ = lt; + ONES = ExpectedOnes; + destReg_ = destReg.code(); + destType_ = 0; } - void init(uint32_t index_, Assembler::Condition cond_, LoadType lt, const VFPRegister &destReg_) { - JS_ASSERT(destReg_.isFloat()); - index = index_; - JS_ASSERT(index == index_); - cond = cond_ >> 28; - JS_ASSERT(cond == cond_ >> 28); - loadType = lt; - ONES = expectedOnes; - destReg = destReg_.isDouble() ? destReg_.code() : destReg_.doubleOverlay().code(); - destType = destReg_.isDouble(); + void init(uint32_t index, Assembler::Condition cond, LoadType lt, const VFPRegister &destReg) { + JS_ASSERT(destReg.isFloat()); + index_ = index; + JS_ASSERT(index_ == index); + cond_ = cond >> 28; + JS_ASSERT(cond_ == cond >> 28); + loadType_ = lt; + ONES = ExpectedOnes; + destReg_ = destReg.isDouble() ? destReg.code() : destReg.doubleOverlay().code(); + destType_ = destReg.isDouble(); } Assembler::Condition getCond() { - return Assembler::Condition(cond << 28); + return Assembler::Condition(cond_ << 28); } Register getReg() { - return Register::FromCode(destReg); + return Register::FromCode(destReg_); } VFPRegister getVFPReg() { - VFPRegister r = VFPRegister(FloatRegister::FromCode(destReg)); - return destType ? r : r.singleOverlay(); + VFPRegister r = VFPRegister(FloatRegister::FromCode(destReg_)); + return destType_ ? r : r.singleOverlay(); } int32_t getIndex() { - return index; + return index_; } - void setIndex(uint32_t index_) { - JS_ASSERT(ONES == expectedOnes && loadType != poolBOGUS); - index = index_; - JS_ASSERT(index == index_); + void setIndex(uint32_t index) { + JS_ASSERT(ONES == ExpectedOnes && loadType_ != PoolBOGUS); + index_ = index; + JS_ASSERT(index_ == index); } LoadType getLoadType() { - // If this *was* a poolBranch, but the branch has already been bound + // If this *was* a PoolBranch, but the branch has already been bound // then this isn't going to look like a real poolhintdata, but we still // want to lie about it so everyone knows it *used* to be a branch. - if (ONES != expectedOnes) - return PoolHintData::poolBranch; - return loadType; + if (ONES != ExpectedOnes) + return PoolHintData::PoolBranch; + return loadType_; } bool isValidPoolHint() { - // Most instructions cannot have a condition that is 0xf. Notable exceptions are - // blx and the entire NEON instruction set. For the purposes of pool loads, and - // possibly patched branches, the possible instructions are ldr and b, neither of - // which can have a condition code of 0xf. - return ONES == expectedOnes; + // Most instructions cannot have a condition that is 0xf. Notable + // exceptions are blx and the entire NEON instruction set. For the + // purposes of pool loads, and possibly patched branches, the possible + // instructions are ldr and b, neither of which can have a condition + // code of 0xf. + return ONES == ExpectedOnes; } }; @@ -1631,9 +1634,8 @@ union PoolHintPun { uint32_t raw; }; -// Handles all of the other integral data transferring functions: -// ldrsb, ldrsh, ldrd, etc. -// size is given in bits. +// Handles all of the other integral data transferring functions: ldrsb, ldrsh, +// ldrd, etc. The size is given in bits. BufferOffset Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode, Register rt, EDtrAddr addr, Condition c, uint32_t *dest) @@ -1643,13 +1645,13 @@ Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode, switch(size) { case 8: JS_ASSERT(IsSigned); - JS_ASSERT(ls!=IsStore); + JS_ASSERT(ls != IsStore); extra_bits1 = 0x1; extra_bits2 = 0x2; break; case 16: - //case 32: - // doesn't need to be handled-- it is handled by the default ldr/str + // 'case 32' doesn't need to be handled, it is handled by the default + // ldr/str. extra_bits2 = 0x01; extra_bits1 = (ls == IsStore) ? 0 : 1; if (IsSigned) { @@ -1680,7 +1682,7 @@ BufferOffset Assembler::as_Imm32Pool(Register dest, uint32_t value, Condition c) { PoolHintPun php; - php.phd.init(0, c, PoolHintData::poolDTR, dest); + php.phd.init(0, c, PoolHintData::PoolDTR, dest); return m_buffer.insertEntry(4, (uint8_t*)&php.raw, int32Pool, (uint8_t*)&value); } @@ -1703,11 +1705,11 @@ BufferOffset Assembler::as_BranchPool(uint32_t value, RepatchLabel *label, ARMBuffer::PoolEntry *pe, Condition c) { PoolHintPun php; - php.phd.init(0, c, PoolHintData::poolBranch, pc); + php.phd.init(0, c, PoolHintData::PoolBranch, pc); BufferOffset ret = m_buffer.insertEntry(4, (uint8_t*)&php.raw, int32Pool, (uint8_t*)&value, pe, /* markAsBranch = */ true); - // If this label is already bound, then immediately replace the stub load with - // a correct branch. + // If this label is already bound, then immediately replace the stub load + // with a correct branch. if (label->bound()) { BufferOffset dest(label); as_b(dest.diffB(ret), c, ret); @@ -1722,7 +1724,7 @@ Assembler::as_FImm64Pool(VFPRegister dest, double value, Condition c) { JS_ASSERT(dest.isDouble()); PoolHintPun php; - php.phd.init(0, c, PoolHintData::poolVDTR, dest); + php.phd.init(0, c, PoolHintData::PoolVDTR, dest); return m_buffer.insertEntry(4, (uint8_t*)&php.raw, doublePool, (uint8_t*)&value); } @@ -1736,21 +1738,19 @@ JS_STATIC_ASSERT(sizeof(PaddedFloat32) == sizeof(double)); BufferOffset Assembler::as_FImm32Pool(VFPRegister dest, float value, Condition c) { - /* - * Insert floats into the double pool as they have the same limitations on - * immediate offset. This wastes 4 bytes padding per float. An alternative - * would be to have a separate pool for floats. - */ + // Insert floats into the double pool as they have the same limitations on + // immediate offset. This wastes 4 bytes padding per float. An alternative + // would be to have a separate pool for floats. JS_ASSERT(dest.isSingle()); PoolHintPun php; - php.phd.init(0, c, PoolHintData::poolVDTR, dest); + php.phd.init(0, c, PoolHintData::PoolVDTR, dest); PaddedFloat32 pf = { value, 0 }; return m_buffer.insertEntry(4, (uint8_t*)&php.raw, doublePool, (uint8_t*)&pf); } // Pool callbacks stuff: void -Assembler::insertTokenIntoTag(uint32_t instSize, uint8_t *load_, int32_t token) +Assembler::InsertTokenIntoTag(uint32_t instSize, uint8_t *load_, int32_t token) { uint32_t *load = (uint32_t*) load_; PoolHintPun php; @@ -1758,81 +1758,72 @@ Assembler::insertTokenIntoTag(uint32_t instSize, uint8_t *load_, int32_t token) php.phd.setIndex(token); *load = php.raw; } -// patchConstantPoolLoad takes the address of the instruction that wants to be patched, and -//the address of the start of the constant pool, and figures things out from there. + +// patchConstantPoolLoad takes the address of the instruction that wants to be +// patched, and the address of the start of the constant pool, and figures +// things out from there. bool -Assembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr) +Assembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) { PoolHintData data = *(PoolHintData*)loadAddr; uint32_t *instAddr = (uint32_t*) loadAddr; int offset = (char *)constPoolAddr - (char *)loadAddr; switch(data.getLoadType()) { - case PoolHintData::poolBOGUS: + case PoolHintData::PoolBOGUS: MOZ_ASSUME_UNREACHABLE("bogus load type!"); - case PoolHintData::poolDTR: - dummy->as_dtr(IsLoad, 32, Offset, data.getReg(), + case PoolHintData::PoolDTR: + Dummy->as_dtr(IsLoad, 32, Offset, data.getReg(), DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)), data.getCond(), instAddr); break; - case PoolHintData::poolBranch: - // Either this used to be a poolBranch, and the label was already bound, so it was - // replaced with a real branch, or this may happen in the future. - // If this is going to happen in the future, then the actual bits that are written here - // don't matter (except the condition code, since that is always preserved across - // patchings) but if it does not get bound later, - // then we want to make sure this is a load from the pool entry (and the pool entry - // should be nullptr so it will crash). + case PoolHintData::PoolBranch: + // Either this used to be a poolBranch, and the label was already bound, + // so it was replaced with a real branch, or this may happen in the + // future. If this is going to happen in the future, then the actual + // bits that are written here don't matter (except the condition code, + // since that is always preserved across patchings) but if it does not + // get bound later, then we want to make sure this is a load from the + // pool entry (and the pool entry should be nullptr so it will crash). if (data.isValidPoolHint()) { - dummy->as_dtr(IsLoad, 32, Offset, pc, + Dummy->as_dtr(IsLoad, 32, Offset, pc, DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)), data.getCond(), instAddr); } break; - case PoolHintData::poolVDTR: { + case PoolHintData::PoolVDTR: { VFPRegister dest = data.getVFPReg(); int32_t imm = offset + (8 * data.getIndex()) - 8; if (imm < -1023 || imm > 1023) return false; - dummy->as_vdtr(IsLoad, dest, VFPAddr(pc, VFPOffImm(imm)), data.getCond(), instAddr); + Dummy->as_vdtr(IsLoad, dest, VFPAddr(pc, VFPOffImm(imm)), data.getCond(), instAddr); break; } } return true; } -uint32_t -Assembler::placeConstantPoolBarrier(int offset) -{ - // BUG: 700526 - // this is still an active path, however, we do not hit it in the test - // suite at all. - MOZ_ASSUME_UNREACHABLE("ARMAssembler holdover"); -} - // Control flow stuff: -// bx can *only* branch to a register -// never to an immediate. +// bx can *only* branch to a register, never to an immediate. BufferOffset Assembler::as_bx(Register r, Condition c, bool isPatchable) { - BufferOffset ret = writeInst(((int) c) | op_bx | r.code()); + BufferOffset ret = writeInst(((int) c) | OpBx | r.code()); if (c == Always && !isPatchable) m_buffer.markGuard(); return ret; } void -Assembler::writePoolGuard(BufferOffset branch, Instruction *dest, BufferOffset afterPool) +Assembler::WritePoolGuard(BufferOffset branch, Instruction *dest, BufferOffset afterPool) { BOffImm off = afterPool.diffB(branch); *dest = InstBImm(off, Always); } // Branch can branch to an immediate *or* to a register. -// Branches to immediates are pc relative, branches to registers -// are absolute +// Branches to immediates are pc relative, branches to registers are absolute. BufferOffset Assembler::as_b(BOffImm off, Condition c, bool isPatchable) { - BufferOffset ret = writeBranchInst(((int)c) | op_b | off.encode()); + BufferOffset ret = writeBranchInst(((int)c) | OpB | off.encode()); if (c == Always && !isPatchable) m_buffer.markGuard(); return ret; @@ -1859,7 +1850,7 @@ Assembler::as_b(Label *l, Condition c, bool isPatchable) old = l->offset(); // This will currently throw an assertion if we couldn't actually // encode the offset of the branch. - if (!BOffImm::isInRange(old)) { + if (!BOffImm::IsInRange(old)) { m_buffer.fail_bail(); return ret; } @@ -1881,14 +1872,14 @@ Assembler::as_b(BOffImm off, Condition c, BufferOffset inst) } // blx can go to either an immediate or a register. -// When blx'ing to a register, we change processor state -// depending on the low bit of the register -// when blx'ing to an immediate, we *always* change processor state. +// When blx'ing to a register, we change processor state depending on the low +// bit of the register when blx'ing to an immediate, we *always* change +// processor state. BufferOffset Assembler::as_blx(Register r, Condition c) { - return writeInst(((int) c) | op_blx | r.code()); + return writeInst(((int) c) | OpBlx | r.code()); } // bl can only branch to an pc-relative immediate offset @@ -1896,7 +1887,7 @@ Assembler::as_blx(Register r, Condition c) BufferOffset Assembler::as_bl(BOffImm off, Condition c) { - return writeBranchInst(((int)c) | op_bl | off.encode()); + return writeBranchInst(((int)c) | OpBl | off.encode()); } BufferOffset @@ -1918,10 +1909,10 @@ Assembler::as_bl(Label *l, Condition c) BufferOffset ret; // See if the list was empty :( if (l->used()) { - // This will currently throw an assertion if we couldn't actually - // encode the offset of the branch. + // This will currently throw an assertion if we couldn't actually encode + // the offset of the branch. old = l->offset(); - if (!BOffImm::isInRange(old)) { + if (!BOffImm::IsInRange(old)) { m_buffer.fail_bail(); return ret; } @@ -1951,22 +1942,23 @@ Assembler::as_mrs(Register r, Condition c) BufferOffset Assembler::as_msr(Register r, Condition c) { - // hardcode the 'mask' field to 0b11 for now. it is bits 18 and 19, which are the two high bits of the 'c' in this constant. + // Hardcode the 'mask' field to 0b11 for now. It is bits 18 and 19, which + // are the two high bits of the 'c' in this constant. JS_ASSERT((r.code() & ~0xf) == 0); return writeInst(0x012cf000 | int(c) | r.code()); } // VFP instructions! enum vfp_tags { - vfp_tag = 0x0C000A00, - vfp_arith = 0x02000000 + VfpTag = 0x0C000A00, + VfpArith = 0x02000000 }; BufferOffset Assembler::writeVFPInst(vfp_size sz, uint32_t blob, uint32_t *dest) { JS_ASSERT((sz & blob) == 0); - JS_ASSERT((vfp_tag & blob) == 0); - return writeInst(vfp_tag | sz | blob, dest); + JS_ASSERT((VfpTag & blob) == 0); + return writeInst(VfpTag | sz | blob, dest); } // Unityped variants: all registers hold the same (ieee754 single/double) @@ -1975,39 +1967,39 @@ BufferOffset Assembler::as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm, VFPOp op, Condition c) { - // Make sure we believe that all of our operands are the same kind + // Make sure we believe that all of our operands are the same kind. JS_ASSERT_IF(!vn.isMissing(), vd.equiv(vn)); JS_ASSERT_IF(!vm.isMissing(), vd.equiv(vm)); - vfp_size sz = vd.isDouble() ? isDouble : isSingle; - return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | vfp_arith | c); + vfp_size sz = vd.isDouble() ? IsDouble : IsSingle; + return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | VfpArith | c); } BufferOffset Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c) { - return as_vfp_float(vd, vn, vm, opv_add, c); + return as_vfp_float(vd, vn, vm, OpvAdd, c); } BufferOffset Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c) { - return as_vfp_float(vd, vn, vm, opv_div, c); + return as_vfp_float(vd, vn, vm, OpvDiv, c); } BufferOffset Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c) { - return as_vfp_float(vd, vn, vm, opv_mul, c); + return as_vfp_float(vd, vn, vm, OpvMul, c); } BufferOffset Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c) { - return as_vfp_float(vd, vn, vm, opv_mul, c); + return as_vfp_float(vd, vn, vm, OpvMul, c); MOZ_ASSUME_UNREACHABLE("Feature NYI"); } @@ -2029,70 +2021,70 @@ Assembler::as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm, BufferOffset Assembler::as_vneg(VFPRegister vd, VFPRegister vm, Condition c) { - return as_vfp_float(vd, NoVFPRegister, vm, opv_neg, c); + return as_vfp_float(vd, NoVFPRegister, vm, OpvNeg, c); } BufferOffset Assembler::as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c) { - return as_vfp_float(vd, NoVFPRegister, vm, opv_sqrt, c); + return as_vfp_float(vd, NoVFPRegister, vm, OpvSqrt, c); } BufferOffset Assembler::as_vabs(VFPRegister vd, VFPRegister vm, Condition c) { - return as_vfp_float(vd, NoVFPRegister, vm, opv_abs, c); + return as_vfp_float(vd, NoVFPRegister, vm, OpvAbs, c); } BufferOffset Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c) { - return as_vfp_float(vd, vn, vm, opv_sub, c); + return as_vfp_float(vd, vn, vm, OpvSub, c); } BufferOffset Assembler::as_vcmp(VFPRegister vd, VFPRegister vm, Condition c) { - return as_vfp_float(vd, NoVFPRegister, vm, opv_cmp, c); + return as_vfp_float(vd, NoVFPRegister, vm, OpvCmp, c); } BufferOffset Assembler::as_vcmpz(VFPRegister vd, Condition c) { - return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, opv_cmpz, c); + return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, OpvCmpz, c); } // Specifically, a move between two same sized-registers. BufferOffset Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c) { - return as_vfp_float(vd, NoVFPRegister, vsrc, opv_mov, c); + return as_vfp_float(vd, NoVFPRegister, vsrc, OpvMov, c); } -//xfer between Core and VFP +// Transfer between Core and VFP. -// Unlike the next function, moving between the core registers and vfp -// registers can't be *that* properly typed. Namely, since I don't want to -// munge the type VFPRegister to also include core registers. Thus, the core -// and vfp registers are passed in based on their type, and src/dest is -// determined by the float2core. +// Unlike the next function, moving between the core registers and vfp registers +// can't be *that* properly typed. Namely, since I don't want to munge the type +// VFPRegister to also include core registers. Thus, the core and vfp registers +// are passed in based on their type, and src/dest is determined by the +// float2core. BufferOffset Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c, Condition c, int idx) { - vfp_size sz = isSingle; + vfp_size sz = IsSingle; if (vm.isDouble()) { // Technically, this can be done with a vmov à la ARM ARM under vmov - // however, that requires at least an extra bit saying if the - // operation should be performed on the lower or upper half of the - // double. Moving a single to/from 2N/2N+1 isn't equivalent, - // since there are 32 single registers, and 32 double registers - // so there is no way to encode the last 16 double registers. - sz = isDouble; + // however, that requires at least an extra bit saying if the operation + // should be performed on the lower or upper half of the double. Moving + // a single to/from 2N/2N+1 isn't equivalent, since there are 32 single + // registers, and 32 double registers so there is no way to encode the + // last 16 double registers. + sz = IsDouble; JS_ASSERT(idx == 0 || idx == 1); - // If we are transferring a single half of the double - // then it must be moving a VFP reg to a core reg. + // If we are transferring a single half of the double then it must be + // moving a VFP reg to a core reg. if (vt2 == InvalidReg) JS_ASSERT(f2c == FloatToCore); idx = idx << 21; @@ -2110,52 +2102,51 @@ Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c } } enum vcvt_destFloatness { - toInteger = 1 << 18, - toFloat = 0 << 18 + VcvtToInteger = 1 << 18, + VcvtToFloat = 0 << 18 }; enum vcvt_toZero { - toZero = 1 << 7, // use the default rounding mode, which rounds truncates - toFPSCR = 0 << 7 // use whatever rounding mode the fpscr specifies + VcvtToZero = 1 << 7, // Use the default rounding mode, which rounds truncates. + VcvtToFPSCR = 0 << 7 // Use whatever rounding mode the fpscr specifies. }; enum vcvt_Signedness { - toSigned = 1 << 16, - toUnsigned = 0 << 16, - fromSigned = 1 << 7, - fromUnsigned = 0 << 7 + VcvtToSigned = 1 << 16, + VcvtToUnsigned = 0 << 16, + VcvtFromSigned = 1 << 7, + VcvtFromUnsigned = 0 << 7 }; -// our encoding actually allows just the src and the dest (and their types) -// to uniquely specify the encoding that we are going to use. +// Our encoding actually allows just the src and the dest (and their types) to +// uniquely specify the encoding that we are going to use. BufferOffset Assembler::as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR, Condition c) { - // Unlike other cases, the source and dest types cannot be the same + // Unlike other cases, the source and dest types cannot be the same. JS_ASSERT(!vd.equiv(vm)); - vfp_size sz = isDouble; + vfp_size sz = IsDouble; if (vd.isFloat() && vm.isFloat()) { - // Doing a float -> float conversion + // Doing a float -> float conversion. if (vm.isSingle()) - sz = isSingle; - return writeVFPInst(sz, c | 0x02B700C0 | - VM(vm) | VD(vd)); + sz = IsSingle; + return writeVFPInst(sz, c | 0x02B700C0 | VM(vm) | VD(vd)); } // At least one of the registers should be a float. vcvt_destFloatness destFloat; vcvt_Signedness opSign; - vcvt_toZero doToZero = toFPSCR; + vcvt_toZero doToZero = VcvtToFPSCR; JS_ASSERT(vd.isFloat() || vm.isFloat()); if (vd.isSingle() || vm.isSingle()) { - sz = isSingle; + sz = IsSingle; } if (vd.isFloat()) { - destFloat = toFloat; - opSign = (vm.isSInt()) ? fromSigned : fromUnsigned; + destFloat = VcvtToFloat; + opSign = (vm.isSInt()) ? VcvtFromSigned : VcvtFromUnsigned; } else { - destFloat = toInteger; - opSign = (vd.isSInt()) ? toSigned : toUnsigned; - doToZero = useFPSCR ? toFPSCR : toZero; + destFloat = VcvtToInteger; + opSign = (vd.isSInt()) ? VcvtToSigned : VcvtToUnsigned; + doToZero = useFPSCR ? VcvtToFPSCR : VcvtToZero; } return writeVFPInst(sz, c | 0x02B80040 | VD(vd) | VM(vm) | destFloat | opSign | doToZero); } @@ -2165,7 +2156,7 @@ Assembler::as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint, bool { JS_ASSERT(vd.isFloat()); uint32_t sx = 0x1; - vfp_size sf = vd.isDouble() ? isDouble : isSingle; + vfp_size sf = vd.isDouble() ? IsDouble : IsSingle; int32_t imm5 = fixedPoint; imm5 = (sx ? 32 : 16) - imm5; JS_ASSERT(imm5 >= 0); @@ -2174,31 +2165,30 @@ Assembler::as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint, bool (!isSigned) << 16 | imm5 | c); } -// xfer between VFP and memory +// Transfer between VFP and memory. BufferOffset Assembler::as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr, - Condition c /* vfp doesn't have a wb option*/, + Condition c /* vfp doesn't have a wb option */, uint32_t *dest) { - vfp_size sz = vd.isDouble() ? isDouble : isSingle; + vfp_size sz = vd.isDouble() ? IsDouble : IsSingle; return writeVFPInst(sz, ls | 0x01000000 | addr.encode() | VD(vd) | c, dest); } -// VFP's ldm/stm work differently from the standard arm ones. -// You can only transfer a range +// VFP's ldm/stm work differently from the standard arm ones. You can only +// transfer a range. BufferOffset Assembler::as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length, - /*also has update conditions*/Condition c) + /* also has update conditions */ Condition c) { JS_ASSERT(length <= 16 && length >= 0); - vfp_size sz = vd.isDouble() ? isDouble : isSingle; + vfp_size sz = vd.isDouble() ? IsDouble : IsSingle; if (vd.isDouble()) length *= 2; - return writeVFPInst(sz, dtmLoadStore | RN(rn) | VD(vd) | - length | + return writeVFPInst(sz, dtmLoadStore | RN(rn) | VD(vd) | length | dtmMode | dtmUpdate | dtmCond); } @@ -2206,7 +2196,7 @@ BufferOffset Assembler::as_vimm(VFPRegister vd, VFPImm imm, Condition c) { JS_ASSERT(imm.isValid()); - vfp_size sz = vd.isDouble() ? isDouble : isSingle; + vfp_size sz = vd.isDouble() ? IsDouble : IsSingle; return writeVFPInst(sz, c | imm.encode() | VD(vd) | 0x02B00000); } @@ -2233,9 +2223,8 @@ Assembler::nextLink(BufferOffset b, BufferOffset *next) if (destOff.isInvalid()) return false; - // Propagate the next link back to the caller, by - // constructing a new BufferOffset into the space they - // provided. + // Propagate the next link back to the caller, by constructing a new + // BufferOffset into the space they provided. new (next) BufferOffset(destOff.decode()); return true; } @@ -2245,8 +2234,8 @@ Assembler::bind(Label *label, BufferOffset boff) { if (label->used()) { bool more; - // If our caller didn't give us an explicit target to bind to - // then we want to bind to the location of the next instruction + // If our caller didn't give us an explicit target to bind to then we + // want to bind to the location of the next instruction. BufferOffset dest = boff.assigned() ? boff : nextOffset(); BufferOffset b(label); do { @@ -2272,8 +2261,8 @@ Assembler::bind(RepatchLabel *label) { BufferOffset dest = nextOffset(); if (label->used()) { - // If the label has a use, then change this use to refer to - // the bound label; + // If the label has a use, then change this use to refer to the bound + // label. BufferOffset branchOff(label->offset()); // Since this was created with a RepatchLabel, the value written in the // instruction stream is not branch shaped, it is PoolHintData shaped. @@ -2306,8 +2295,8 @@ Assembler::retarget(Label *label, Label *target) while (nextLink(labelBranchOffset, &next)) labelBranchOffset = next; - // Then patch the head of label's use chain to the tail of - // target's use chain, prepending the entire use chain of target. + // Then patch the head of label's use chain to the tail of target's + // use chain, prepending the entire use chain of target. Instruction branch = *editSrc(labelBranchOffset); Condition c; branch.extractCond(&c); @@ -2319,7 +2308,7 @@ Assembler::retarget(Label *label, Label *target) else MOZ_ASSUME_UNREACHABLE("crazy fixup!"); } else { - // The target is unbound and unused. We can just take the head of + // The target is unbound and unused. We can just take the head of // the list hanging off of label, and dump that into target. DebugOnly prev = target->use(label->offset()); JS_ASSERT((int32_t)prev == Label::INVALID_OFFSET); @@ -2335,35 +2324,29 @@ static int stopBKPT = -1; void Assembler::as_bkpt() { - // This is a count of how many times a breakpoint instruction has been generated. - // It is embedded into the instruction for debugging purposes. gdb will print "bkpt xxx" - // when you attempt to dissassemble a breakpoint with the number xxx embedded into it. - // If this breakpoint is being hit, then you can run (in gdb) - // >b dbg_break - // >b main - // >commands - // >set stopBKPT = xxx - // >c - // >end - - // which will set a breakpoint on the function dbg_break above - // set a scripted breakpoint on main that will set the (otherwise unmodified) - // value to the number of the breakpoint, so dbg_break will actuall be called - // and finally, when you run the executable, execution will halt when that - // breakpoint is generated + // This is a count of how many times a breakpoint instruction has been + // generated. It is embedded into the instruction for debugging + // purposes. Gdb will print "bkpt xxx" when you attempt to dissassemble a + // breakpoint with the number xxx embedded into it. If this breakpoint is + // being hit, then you can run (in gdb): + // >b dbg_break + // >b main + // >commands + // >set stopBKPT = xxx + // >c + // >end + // which will set a breakpoint on the function dbg_break above set a + // scripted breakpoint on main that will set the (otherwise unmodified) + // value to the number of the breakpoint, so dbg_break will actuall be + // called and finally, when you run the executable, execution will halt when + // that breakpoint is generated. static int hit = 0; if (stopBKPT == hit) dbg_break(); - writeInst(0xe1200070 | (hit & 0xf) | ((hit & 0xfff0)<<4)); + writeInst(0xe1200070 | (hit & 0xf) | ((hit & 0xfff0) << 4)); hit++; } -void -Assembler::dumpPool() -{ - m_buffer.flushPool(); -} - void Assembler::flushBuffer() { @@ -2394,15 +2377,15 @@ Assembler::getBranchOffset(const Instruction *i_) return dest.decode(); } void -Assembler::retargetNearBranch(Instruction *i, int offset, bool final) +Assembler::RetargetNearBranch(Instruction *i, int offset, bool final) { Assembler::Condition c; i->extractCond(&c); - retargetNearBranch(i, offset, c, final); + RetargetNearBranch(i, offset, c, final); } void -Assembler::retargetNearBranch(Instruction *i, int offset, Condition cond, bool final) +Assembler::RetargetNearBranch(Instruction *i, int offset, Condition cond, bool final) { // Retargeting calls is totally unsupported! JS_ASSERT_IF(i->is(), i->is() || i->is()); @@ -2411,13 +2394,13 @@ Assembler::retargetNearBranch(Instruction *i, int offset, Condition cond, bool f else new (i) InstBImm(BOffImm(offset), cond); - // Flush the cache, since an instruction was overwritten + // Flush the cache, since an instruction was overwritten. if (final) AutoFlushICache::flush(uintptr_t(i), 4); } void -Assembler::retargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond) +Assembler::RetargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond) { int32_t offset = reinterpret_cast(slot) - reinterpret_cast(i); if (!i->is()) { @@ -2431,8 +2414,8 @@ Assembler::retargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Cond struct PoolHeader : Instruction { struct Header { - // size should take into account the pool header. - // size is in units of Instruction (4bytes), not byte + // The size should take into account the pool header. + // The size is in units of Instruction (4 bytes), not byte. uint32_t size : 15; bool isNatural : 1; uint32_t ONES : 16; @@ -2469,11 +2452,11 @@ struct PoolHeader : Instruction { Header tmp(this); return tmp.isNatural; } - static bool isTHIS(const Instruction &i) { + static bool IsTHIS(const Instruction &i) { return (*i.raw() & 0xffff0000) == 0xffff0000; } - static const PoolHeader *asTHIS(const Instruction &i) { - if (!isTHIS(i)) + static const PoolHeader *AsTHIS(const Instruction &i) { + if (!IsTHIS(i)) return nullptr; return static_cast(&i); } @@ -2481,11 +2464,11 @@ struct PoolHeader : Instruction { void -Assembler::writePoolHeader(uint8_t *start, Pool *p, bool isNatural) +Assembler::WritePoolHeader(uint8_t *start, Pool *p, bool isNatural) { STATIC_ASSERT(sizeof(PoolHeader) == 4); uint8_t *pool = start+4; - // go through the usual rigaramarole to get the size of the pool. + // Go through the usual rigmarole to get the size of the pool. pool = p[0].addPoolSize(pool); pool = p[1].addPoolSize(pool); pool = p[1].other->addPoolSize(pool); @@ -2500,44 +2483,42 @@ Assembler::writePoolHeader(uint8_t *start, Pool *p, bool isNatural) void -Assembler::writePoolFooter(uint8_t *start, Pool *p, bool isNatural) +Assembler::WritePoolFooter(uint8_t *start, Pool *p, bool isNatural) { return; } -// The size of an arbitrary 32-bit call in the instruction stream. -// On ARM this sequence is |pc = ldr pc - 4; imm32| given that we -// never reach the imm32. +// The size of an arbitrary 32-bit call in the instruction stream. On ARM this +// sequence is |pc = ldr pc - 4; imm32| given that we never reach the imm32. uint32_t -Assembler::patchWrite_NearCallSize() +Assembler::PatchWrite_NearCallSize() { return sizeof(uint32_t); } void -Assembler::patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall) +Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall) { Instruction *inst = (Instruction *) start.raw(); - // Overwrite whatever instruction used to be here with a call. - // Since the destination is in the same function, it will be within range of the 24<<2 byte - // bl instruction. + // Overwrite whatever instruction used to be here with a call. Since the + // destination is in the same function, it will be within range of the + // 24 << 2 byte bl instruction. uint8_t *dest = toCall.raw(); new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst) , Always); // Ensure everyone sees the code that was just written into memory. - AutoFlushICache::flush(uintptr_t(inst), 4); } void -Assembler::patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, +Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, PatchedImmPtr expectedValue) { Instruction *ptr = (Instruction *) label.raw(); InstructionIterator iter(ptr); Register dest; Assembler::RelocStyle rs; - DebugOnly val = getPtr32Target(&iter, &dest, &rs); + DebugOnly val = GetPtr32Target(&iter, &dest, &rs); JS_ASSERT((uint32_t)(const uint32_t *)val == uint32_t(expectedValue.value)); - reinterpret_cast(dummy)->ma_movPatchable(Imm32(int32_t(newValue.value)), + reinterpret_cast(Dummy)->ma_movPatchable(Imm32(int32_t(newValue.value)), dest, Always, rs, ptr); // L_LDR won't cause any instructions to be updated. if (rs != L_LDR) { @@ -2547,28 +2528,28 @@ Assembler::patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newVal } void -Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue) +Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue) { - patchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expectedValue.value)); + PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expectedValue.value)); } // This just stomps over memory with 32 bits of raw data. Its purpose is to // overwrite the call of JITed code with 32 bits worth of an offset. This will -// is only meant to function on code that has been invalidated, so it should -// be totally safe. Since that instruction will never be executed again, a -// ICache flush should not be necessary +// is only meant to function on code that has been invalidated, so it should be +// totally safe. Since that instruction will never be executed again, a ICache +// flush should not be necessary void -Assembler::patchWrite_Imm32(CodeLocationLabel label, Imm32 imm) { +Assembler::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) { // Raw is going to be the return address. uint32_t *raw = (uint32_t*)label.raw(); - // Overwrite the 4 bytes before the return address, which will - // end up being the call instruction. - *(raw-1) = imm.value; + // Overwrite the 4 bytes before the return address, which will end up being + // the call instruction. + *(raw - 1) = imm.value; } uint8_t * -Assembler::nextInstruction(uint8_t *inst_, uint32_t *count) +Assembler::NextInstruction(uint8_t *inst_, uint32_t *count) { Instruction *inst = reinterpret_cast(inst_); if (count != nullptr) @@ -2586,16 +2567,17 @@ InstIsGuard(Instruction *inst, const PoolHeader **ph) if (!(inst->is() || inst->is())) return false; // See if the next instruction is a pool header. - *ph = (inst+1)->as(); + *ph = (inst + 1)->as(); return *ph != nullptr; } static bool InstIsBNop(Instruction *inst) { - // In some special situations, it is necessary to insert a NOP - // into the instruction stream that nobody knows about, since nobody should know about - // it, make sure it gets skipped when Instruction::next() is called. - // this generates a very specific nop, namely a branch to the next instruction. + // In some special situations, it is necessary to insert a NOP into the + // instruction stream that nobody knows about, since nobody should know + // about it, make sure it gets skipped when Instruction::next() is called. + // this generates a very specific nop, namely a branch to the next + // instruction. Assembler::Condition c; inst->extractCond(&c); if (c != Assembler::Always) @@ -2621,9 +2603,8 @@ Instruction * Instruction::skipPool() { const PoolHeader *ph; - // If this is a guard, and the next instruction is a header, - // always work around the pool. If it isn't a guard, then start - // looking ahead. + // If this is a guard, and the next instruction is a header, always work + // around the pool. If it isn't a guard, then start looking ahead. if (InstIsGuard(this, &ph)) { // Don't skip a natural guard. if (ph->isNatural()) @@ -2671,8 +2652,8 @@ Instruction::next() { Instruction *ret = this+1; const PoolHeader *ph; - // If this is a guard, and the next instruction is a header, always work around the pool - // If it isn't a guard, then start looking ahead. + // If this is a guard, and the next instruction is a header, always work + // around the pool. If it isn't a guard, then start looking ahead. if (InstIsGuard(this, &ph)) return (ret + ph->size())->skipPool(); if (InstIsArtificialGuard(ret, &ph)) @@ -2726,9 +2707,8 @@ Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) JS_ASSERT(inst->is() || inst->is()); if (inst->is()) { - // If it looks like the start of a movw/movt sequence, - // then make sure we have all of it (and advance the iterator - // past the full sequence) + // If it looks like the start of a movw/movt sequence, then make sure we + // have all of it (and advance the iterator past the full sequence). inst = inst->next(); JS_ASSERT(inst->is()); } @@ -2758,9 +2738,8 @@ Assembler::ToggledCallSize(uint8_t *code) JS_ASSERT(inst->is() || inst->is()); if (inst->is()) { - // If it looks like the start of a movw/movt sequence, - // then make sure we have all of it (and advance the iterator - // past the full sequence) + // If it looks like the start of a movw/movt sequence, then make sure we + // have all of it (and advance the iterator past the full sequence). inst = inst->next(); JS_ASSERT(inst->is()); } @@ -2780,7 +2759,7 @@ Assembler::BailoutTableStart(uint8_t *code) return (uint8_t *) inst; } -void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst) +void Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction *inst) { JS_ASSERT(inst->is()); InstCMP *cmp = inst->as(); @@ -2794,9 +2773,10 @@ void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst) Imm8 imm8 = Imm8(heapSize); JS_ASSERT(!imm8.invalid); - *inst = InstALU(InvalidReg, index, imm8, op_cmp, SetCond, Always); - // NOTE: we don't update the Auto Flush Cache! this function is currently only called from - // within AsmJSModule::patchHeapAccesses, which does that for us. Don't call this! + *inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCond, Always); + // NOTE: we don't update the Auto Flush Cache! this function is currently + // only called from within AsmJSModule::patchHeapAccesses, which does that + // for us. Don't call this! } InstructionIterator::InstructionIterator(Instruction *i_) : i(i_) @@ -2804,7 +2784,7 @@ InstructionIterator::InstructionIterator(Instruction *i_) : i(i_) // Work around pools with an artificial pool guard and around nop-fill. i = i->skipPool(); } -Assembler *Assembler::dummy = nullptr; +Assembler *Assembler::Dummy = nullptr; uint32_t Assembler::NopFill = 0; diff --git a/js/src/jit/arm/Assembler-arm.h b/js/src/jit/arm/Assembler-arm.h index fbca13472f69..b9b08a681432 100644 --- a/js/src/jit/arm/Assembler-arm.h +++ b/js/src/jit/arm/Assembler-arm.h @@ -20,12 +20,10 @@ namespace js { namespace jit { -//NOTE: there are duplicates in this list! -// sometimes we want to specifically refer to the -// link register as a link register (bl lr is much -// clearer than bl r14). HOWEVER, this register can -// easily be a gpr when it is not busy holding the return -// address. +// NOTE: there are duplicates in this list! Sometimes we want to specifically +// refer to the link register as a link register (bl lr is much clearer than bl +// r14). HOWEVER, this register can easily be a gpr when it is not busy holding +// the return address. static MOZ_CONSTEXPR_VAR Register r0 = { Registers::r0 }; static MOZ_CONSTEXPR_VAR Register r1 = { Registers::r1 }; static MOZ_CONSTEXPR_VAR Register r2 = { Registers::r2 }; @@ -131,11 +129,10 @@ static MOZ_CONSTEXPR_VAR FloatRegister d13(FloatRegisters::d13); static MOZ_CONSTEXPR_VAR FloatRegister d14(FloatRegisters::d14); static MOZ_CONSTEXPR_VAR FloatRegister d15(FloatRegisters::d15); -// For maximal awesomeness, 8 should be sufficent. -// ldrd/strd (dual-register load/store) operate in a single cycle -// when the address they are dealing with is 8 byte aligned. -// Also, the ARM abi wants the stack to be 8 byte aligned at -// function boundaries. I'm trying to make sure this is always true. +// For maximal awesomeness, 8 should be sufficent. ldrd/strd (dual-register +// load/store) operate in a single cycle when the address they are dealing with +// is 8 byte aligned. Also, the ARM abi wants the stack to be 8 byte aligned at +// function boundaries. I'm trying to make sure this is always true. static const uint32_t StackAlignment = 8; static const uint32_t CodeAlignment = 8; static const bool StackKeptAligned = true; @@ -164,8 +161,8 @@ uint32_t VD(VFPRegister vr); uint32_t VN(VFPRegister vr); uint32_t VM(VFPRegister vr); -// For being passed into the generic vfp instruction generator when -// there is an instruction that only takes two registers +// For being passed into the generic vfp instruction generator when there is an +// instruction that only takes two registers. static MOZ_CONSTEXPR_VAR VFPRegister NoVFPRegister(VFPRegister::Double, 0, false, true); struct ImmTag : public Imm32 @@ -184,10 +181,10 @@ struct ImmType : public ImmTag enum Index { Offset = 0 << 21 | 1<<24, - PreIndex = 1<<21 | 1 << 24, + PreIndex = 1 << 21 | 1 << 24, PostIndex = 0 << 21 | 0 << 24 - // The docs were rather unclear on this. it sounds like - // 1<<21 | 0 << 24 encodes dtrt + // The docs were rather unclear on this. It sounds like + // 1 << 21 | 0 << 24 encodes dtrt. }; // Seriously, wtf arm @@ -199,7 +196,7 @@ enum IsImmDTR_ { IsImmDTR = 0 << 25, IsNotImmDTR = 1 << 25 }; -// For the extra memory operations, ldrd, ldrsb, ldrh +// For the extra memory operations, ldrd, ldrsb, ldrh. enum IsImmEDTR_ { IsImmEDTR = 1 << 22, IsNotImmEDTR = 0 << 22 @@ -214,8 +211,8 @@ enum ShiftType { RRX = ROR // RRX is encoded as ROR with a 0 offset. }; -// The actual codes that get set by instructions -// and the codes that are checked by the conditions below. +// The actual codes that get set by instructions and the codes that are checked +// by the conditions below. struct ConditionCodes { bool Zero : 1; @@ -224,9 +221,7 @@ struct ConditionCodes bool Minus : 1; }; -// Modes for STM/LDM. -// Names are the suffixes applied to -// the instruction. +// Modes for STM/LDM. Names are the suffixes applied to the instruction. enum DTMMode { A = 0 << 24, // empty / after B = 1 << 24, // full / before @@ -251,108 +246,107 @@ enum LoadStore { IsLoad = 1 << 20, IsStore = 0 << 20 }; -// You almost never want to use this directly. -// Instead, you wantto pass in a signed constant, -// and let this bit be implicitly set for you. -// this is however, necessary if we want a negative index +// You almost never want to use this directly. Instead, you wantto pass in a +// signed constant, and let this bit be implicitly set for you. This is however, +// necessary if we want a negative index. enum IsUp_ { IsUp = 1 << 23, IsDown = 0 << 23 }; enum ALUOp { - op_mov = 0xd << 21, - op_mvn = 0xf << 21, - op_and = 0x0 << 21, - op_bic = 0xe << 21, - op_eor = 0x1 << 21, - op_orr = 0xc << 21, - op_adc = 0x5 << 21, - op_add = 0x4 << 21, - op_sbc = 0x6 << 21, - op_sub = 0x2 << 21, - op_rsb = 0x3 << 21, - op_rsc = 0x7 << 21, - op_cmn = 0xb << 21, - op_cmp = 0xa << 21, - op_teq = 0x9 << 21, - op_tst = 0x8 << 21, - op_invalid = -1 + OpMov = 0xd << 21, + OpMvn = 0xf << 21, + OpAnd = 0x0 << 21, + OpBic = 0xe << 21, + OpEor = 0x1 << 21, + OpOrr = 0xc << 21, + OpAdc = 0x5 << 21, + OpAdd = 0x4 << 21, + OpSbc = 0x6 << 21, + OpSub = 0x2 << 21, + OpRsb = 0x3 << 21, + OpRsc = 0x7 << 21, + OpCmn = 0xb << 21, + OpCmp = 0xa << 21, + OpTeq = 0x9 << 21, + OpTst = 0x8 << 21, + OpInvalid = -1 }; enum MULOp { - opm_mul = 0 << 21, - opm_mla = 1 << 21, - opm_umaal = 2 << 21, - opm_mls = 3 << 21, - opm_umull = 4 << 21, - opm_umlal = 5 << 21, - opm_smull = 6 << 21, - opm_smlal = 7 << 21 + OpmMul = 0 << 21, + OpmMla = 1 << 21, + OpmUmaal = 2 << 21, + OpmMls = 3 << 21, + OpmUmull = 4 << 21, + OpmUmlal = 5 << 21, + OpmSmull = 6 << 21, + OpmSmlal = 7 << 21 }; enum BranchTag { - op_b = 0x0a000000, - op_b_mask = 0x0f000000, - op_b_dest_mask = 0x00ffffff, - op_bl = 0x0b000000, - op_blx = 0x012fff30, - op_bx = 0x012fff10 + OpB = 0x0a000000, + OpBMask = 0x0f000000, + OpBDestMask = 0x00ffffff, + OpBl = 0x0b000000, + OpBlx = 0x012fff30, + OpBx = 0x012fff10 }; // Just like ALUOp, but for the vfp instruction set. enum VFPOp { - opv_mul = 0x2 << 20, - opv_add = 0x3 << 20, - opv_sub = 0x3 << 20 | 0x1 << 6, - opv_div = 0x8 << 20, - opv_mov = 0xB << 20 | 0x1 << 6, - opv_abs = 0xB << 20 | 0x3 << 6, - opv_neg = 0xB << 20 | 0x1 << 6 | 0x1 << 16, - opv_sqrt = 0xB << 20 | 0x3 << 6 | 0x1 << 16, - opv_cmp = 0xB << 20 | 0x1 << 6 | 0x4 << 16, - opv_cmpz = 0xB << 20 | 0x1 << 6 | 0x5 << 16 + OpvMul = 0x2 << 20, + OpvAdd = 0x3 << 20, + OpvSub = 0x3 << 20 | 0x1 << 6, + OpvDiv = 0x8 << 20, + OpvMov = 0xB << 20 | 0x1 << 6, + OpvAbs = 0xB << 20 | 0x3 << 6, + OpvNeg = 0xB << 20 | 0x1 << 6 | 0x1 << 16, + OpvSqrt = 0xB << 20 | 0x3 << 6 | 0x1 << 16, + OpvCmp = 0xB << 20 | 0x1 << 6 | 0x4 << 16, + OpvCmpz = 0xB << 20 | 0x1 << 6 | 0x5 << 16 }; // Negate the operation, AND negate the immediate that we were passed in. ALUOp ALUNeg(ALUOp op, Register dest, Imm32 *imm, Register *negDest); bool can_dbl(ALUOp op); bool condsAreSafe(ALUOp op); -// If there is a variant of op that has a dest (think cmp/sub) -// return that variant of it. +// If there is a variant of op that has a dest (think cmp/sub) return that +// variant of it. ALUOp getDestVariant(ALUOp op); static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data); static const ValueOperand softfpReturnOperand = ValueOperand(r1, r0); // All of these classes exist solely to shuffle data into the various operands. -// For example Operand2 can be an imm8, a register-shifted-by-a-constant or -// a register-shifted-by-a-register. I represent this in C++ by having a -// base class Operand2, which just stores the 32 bits of data as they will be -// encoded in the instruction. You cannot directly create an Operand2 -// since it is tricky, and not entirely sane to do so. Instead, you create -// one of its child classes, e.g. Imm8. Imm8's constructor takes a single -// integer argument. Imm8 will verify that its argument can be encoded -// as an ARM 12 bit imm8, encode it using an Imm8data, and finally call -// its parent's (Operand2) constructor with the Imm8data. The Operand2 -// constructor will then call the Imm8data's encode() function to extract -// the raw bits from it. In the future, we should be able to extract -// data from the Operand2 by asking it for its component Imm8data -// structures. The reason this is so horribly round-about is I wanted -// to have Imm8 and RegisterShiftedRegister inherit directly from Operand2 -// but have all of them take up only a single word of storage. -// I also wanted to avoid passing around raw integers at all -// since they are error prone. +// For example Operand2 can be an imm8, a register-shifted-by-a-constant or a +// register-shifted-by-a-register. We represent this in C++ by having a base +// class Operand2, which just stores the 32 bits of data as they will be encoded +// in the instruction. You cannot directly create an Operand2 since it is +// tricky, and not entirely sane to do so. Instead, you create one of its child +// classes, e.g. Imm8. Imm8's constructor takes a single integer argument. Imm8 +// will verify that its argument can be encoded as an ARM 12 bit imm8, encode it +// using an Imm8data, and finally call its parent's (Operand2) constructor with +// the Imm8data. The Operand2 constructor will then call the Imm8data's encode() +// function to extract the raw bits from it. +// +// In the future, we should be able to extract data from the Operand2 by asking +// it for its component Imm8data structures. The reason this is so horribly +// round-about is we wanted to have Imm8 and RegisterShiftedRegister inherit +// directly from Operand2 but have all of them take up only a single word of +// storage. We also wanted to avoid passing around raw integers at all since +// they are error prone. class Op2Reg; class O2RegImmShift; class O2RegRegShift; namespace datastore { struct Reg { - // the "second register" + // The "second register". uint32_t RM : 4; - // do we get another register for shifting + // Do we get another register for shifting. uint32_t RRS : 1; ShiftType Type : 2; - // I'd like this to be a more sensible encoding, but that would - // need to be a struct and that would not pack :( + // We'd like this to be a more sensible encoding, but that would need to be + // a struct and that would not pack :( uint32_t ShiftAmount : 5; uint32_t pad : 20; @@ -368,18 +362,17 @@ struct Reg } }; -// Op2 has a mode labelled "", which is arm's magical -// immediate encoding. Some instructions actually get 8 bits of -// data, which is called Imm8Data below. These should have edit -// distance > 1, but this is how it is for now. +// Op2 has a mode labelled "", which is arm's magical immediate encoding. +// Some instructions actually get 8 bits of data, which is called Imm8Data +// below. These should have edit distance > 1, but this is how it is for now. struct Imm8mData { private: uint32_t data : 8; uint32_t rot : 4; - // Throw in an extra bit that will be 1 if we can't encode this - // properly. if we can encode it properly, a simple "|" will still - // suffice to meld it into the instruction. + // Throw in an extra bit that will be 1 if we can't encode this properly. + // if we can encode it properly, a simple "|" will still suffice to meld it + // into the instruction. uint32_t buff : 19; public: uint32_t invalid : 1; @@ -413,13 +406,12 @@ struct Imm8Data uint32_t encode() { return imm4L | (imm4H << 8); }; - Imm8Data(uint32_t imm) : imm4L(imm&0xf), imm4H(imm>>4) { + Imm8Data(uint32_t imm) : imm4L(imm & 0xf), imm4H(imm >> 4) { JS_ASSERT(imm <= 0xff); } }; -// VLDR/VSTR take an 8 bit offset, which is implicitly left shifted -// by 2. +// VLDR/VSTR take an 8 bit offset, which is implicitly left shifted by 2. struct Imm8VFPOffData { private: @@ -434,8 +426,8 @@ struct Imm8VFPOffData } }; -// ARM can magically encode 256 very special immediates to be moved -// into a register. +// ARM can magically encode 256 very special immediates to be moved into a +// register. struct Imm8VFPImmData { private: @@ -450,7 +442,7 @@ struct Imm8VFPImmData { } Imm8VFPImmData(uint32_t imm) - : imm4L(imm&0xf), imm4H(imm>>4), isInvalid(0) + : imm4L(imm&0xf), imm4H(imm >> 4), isInvalid(0) { JS_ASSERT(imm <= 0xff); } @@ -495,7 +487,7 @@ struct RIS struct RRS { uint32_t MustZero : 1; - // the register that holds the shift amount + // The register that holds the shift amount. uint32_t RS : 4; RRS(uint32_t rs) @@ -553,14 +545,14 @@ class Operand2 class Imm8 : public Operand2 { public: - static datastore::Imm8mData encodeImm(uint32_t imm) { + static datastore::Imm8mData EncodeImm(uint32_t imm) { // mozilla::CountLeadingZeroes32(imm) requires imm != 0. if (imm == 0) return datastore::Imm8mData(0, 0); int left = mozilla::CountLeadingZeroes32(imm) & 30; // See if imm is a simple value that can be encoded with a rotate of 0. // This is effectively imm <= 0xff, but I assume this can be optimized - // more + // more. if (left >= 24) return datastore::Imm8mData(imm, 0); @@ -568,7 +560,7 @@ class Imm8 : public Operand2 // have 0 yet. int no_imm = imm & ~(0xff << (24 - left)); if (no_imm == 0) { - return datastore::Imm8mData(imm >> (24 - left), ((8+left) >> 1)); + return datastore::Imm8mData(imm >> (24 - left), ((8 + left) >> 1)); } // Look for the most signifigant bit set, once again. int right = 32 - (mozilla::CountLeadingZeroes32(no_imm) & 30); @@ -580,10 +572,10 @@ class Imm8 : public Operand2 // immediate that we were passed in, and see if it fits into 8 bits. unsigned int mask = imm << (8 - right) | imm >> (24 + right); if (mask <= 0xff) - return datastore::Imm8mData(mask, (8-right) >> 1); + return datastore::Imm8mData(mask, (8 - right) >> 1); return datastore::Imm8mData(); } - // pair template? + // Pair template? struct TwoImm8mData { datastore::Imm8mData fst, snd; @@ -597,9 +589,9 @@ class Imm8 : public Operand2 { } }; - static TwoImm8mData encodeTwoImms(uint32_t); + static TwoImm8mData EncodeTwoImms(uint32_t); Imm8(uint32_t imm) - : Operand2(encodeImm(imm)) + : Operand2(EncodeImm(imm)) { } }; @@ -672,11 +664,11 @@ O2RegRegShift lsr (Register r, Register amt); O2RegRegShift asr (Register r, Register amt); O2RegRegShift ror (Register r, Register amt); -// An offset from a register to be used for ldr/str. This should include -// the sign bit, since ARM has "signed-magnitude" offsets. That is it encodes -// an unsigned offset, then the instruction specifies if the offset is positive -// or negative. The +/- bit is necessary if the instruction set wants to be -// able to have a negative register offset e.g. ldr pc, [r1,-r2]; +// An offset from a register to be used for ldr/str. This should include the +// sign bit, since ARM has "signed-magnitude" offsets. That is it encodes an +// unsigned offset, then the instruction specifies if the offset is positive or +// negative. The +/- bit is necessary if the instruction set wants to be able to +// have a negative register offset e.g. ldr pc, [r1,-r2]; class DtrOff { uint32_t data; @@ -707,7 +699,7 @@ class DtrOffImm : public DtrOff class DtrOffReg : public DtrOff { // These are designed to be called by a constructor of a subclass. - // Constructing the necessary RIS/RRS structures are annoying + // Constructing the necessary RIS/RRS structures are annoying. protected: DtrOffReg(Register rn, ShiftType type, datastore::RIS shiftImm, IsUp_ iu = IsUp) : DtrOff(datastore::Reg(rn.code(), type, 0, shiftImm.encode()), iu) @@ -734,7 +726,7 @@ class DtrRegRegShift : public DtrOffReg { } }; -// we will frequently want to bundle a register with its offset so that we have +// We will frequently want to bundle a register with its offset so that we have // an "operand" to a load instruction. class DTRAddr { @@ -789,9 +781,8 @@ class EDtrOffImm : public EDtrOff } }; -// this is the most-derived class, since the extended data -// transfer instructions don't support any sort of modifying the -// "index" operand +// This is the most-derived class, since the extended data transfer instructions +// don't support any sort of modifying the "index" operand. class EDtrOffReg : public EDtrOff { public: @@ -863,7 +854,7 @@ class VFPImm { uint32_t data; public: - static const VFPImm one; + static const VFPImm One; VFPImm(uint32_t topWordOfDouble); @@ -875,8 +866,9 @@ class VFPImm { } }; -// A BOffImm is an immediate that is used for branches. Namely, it is the offset that will -// be encoded in the branch instruction. This is the only sane way of constructing a branch. +// A BOffImm is an immediate that is used for branches. Namely, it is the offset +// that will be encoded in the branch instruction. This is the only sane way of +// constructing a branch. class BOffImm { uint32_t data; @@ -893,10 +885,10 @@ class BOffImm : data ((offset - 8) >> 2 & 0x00ffffff) { JS_ASSERT((offset & 0x3) == 0); - if (!isInRange(offset)) + if (!IsInRange(offset)) CrashAtUnhandlableOOM("BOffImm"); } - static bool isInRange(int offset) + static bool IsInRange(int offset) { if ((offset - 8) < -33554432) return false; @@ -943,15 +935,13 @@ class Imm16 } }; -/* I would preffer that these do not exist, since there are essentially -* no instructions that would ever take more than one of these, however, -* the MIR wants to only have one type of arguments to functions, so bugger. -*/ +// I would preffer that these do not exist, since there are essentially no +// instructions that would ever take more than one of these, however, the MIR +// wants to only have one type of arguments to functions, so bugger. class Operand { - // the encoding of registers is the same for OP2, DTR and EDTR - // yet the type system doesn't let us express this, so choices - // must be made. + // The encoding of registers is the same for OP2, DTR and EDTR yet the type + // system doesn't let us express this, so choices must be made. public: enum Tag_ { OP2, @@ -1037,7 +1027,7 @@ typedef js::jit::AssemblerBufferWithConstantPool<1024, 4, Instruction, Assembler class Assembler : public AssemblerShared { public: - // ARM conditional constants + // ARM conditional constants: enum ARMCondition { EQ = 0x00000000, // Zero NE = 0x10000000, // Non-zero @@ -1094,7 +1084,8 @@ class Assembler : public AssemblerShared static const int DoubleConditionBitSpecial = 0x1; enum DoubleCondition { - // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. + // These conditions will only evaluate to true if the comparison is + // ordered - i.e. neither operand is NaN. DoubleOrdered = VFP_NotUnordered, DoubleEqual = VFP_Equal, DoubleNotEqual = VFP_NotEqualOrUnordered | DoubleConditionBitSpecial, @@ -1120,8 +1111,8 @@ class Assembler : public AssemblerShared return static_cast(cond); } - // :( this should be protected, but since CodeGenerator - // wants to use it, It needs to go out here :( + // This should be protected, but since CodeGenerator wants to use it, it + // needs to go out here :( BufferOffset nextOffset() { return m_buffer.nextOffset(); @@ -1132,7 +1123,7 @@ class Assembler : public AssemblerShared return BufferOffset(l->bound()); } - Instruction * editSrc (BufferOffset bo) { + Instruction *editSrc (BufferOffset bo) { return m_buffer.getInst(bo); } public: @@ -1145,8 +1136,8 @@ class Assembler : public AssemblerShared static uint32_t GetNopFill(); protected: - // structure for fixing up pc-relative loads/jumps when a the machine code - // gets moved (executable copy, gc, etc.) + // Structure for fixing up pc-relative loads/jumps when a the machine code + // gets moved (executable copy, gc, etc.). struct RelativePatch { void *target; @@ -1156,8 +1147,8 @@ class Assembler : public AssemblerShared { } }; - // TODO: this should actually be a pool-like object - // It is currently a big hack, and probably shouldn't exist + // TODO: this should actually be a pool-like object. It is currently a big + // hack, and probably shouldn't exist. js::Vector codeLabels_; js::Vector jumps_; js::Vector tmpJumpRelocations_; @@ -1171,16 +1162,16 @@ class Assembler : public AssemblerShared ARMBuffer m_buffer; - // There is now a semi-unified interface for instruction generation. - // During assembly, there is an active buffer that instructions are - // being written into, but later, we may wish to modify instructions - // that have already been created. In order to do this, we call the - // same assembly function, but pass it a destination address, which - // will be overwritten with a new instruction. In order to do this very - // after assembly buffers no longer exist, when calling with a third - // dest parameter, a this object is still needed. dummy always happens - // to be null, but we shouldn't be looking at it in any case. - static Assembler *dummy; + // There is now a semi-unified interface for instruction generation. During + // assembly, there is an active buffer that instructions are being written + // into, but later, we may wish to modify instructions that have already + // been created. In order to do this, we call the same assembly function, + // but pass it a destination address, which will be overwritten with a new + // instruction. In order to do this very after assembly buffers no longer + // exist, when calling with a third dest parameter, a this object is still + // needed. Dummy always happens to be null, but we shouldn't be looking at + // it in any case. + static Assembler *Dummy; mozilla::Array pools_; Pool *int32Pool; Pool *doublePool; @@ -1202,13 +1193,13 @@ class Assembler : public AssemblerShared void initWithAllocator() { m_buffer.initWithAllocator(); - // Set up the backwards double region + // Set up the backwards double region. new (&pools_[2]) Pool (1024, 8, 4, 8, 8, m_buffer.LifoAlloc_, true); - // Set up the backwards 32 bit region + // Set up the backwards 32 bit region. new (&pools_[3]) Pool (4096, 4, 4, 8, 4, m_buffer.LifoAlloc_, true, true); - // Set up the forwards double region + // Set up the forwards double region. new (doublePool) Pool (1024, 8, 4, 8, 8, m_buffer.LifoAlloc_, false, false, &pools_[2]); - // Set up the forwards 32 bit region + // Set up the forwards 32 bit region. new (int32Pool) Pool (4096, 4, 4, 8, 4, m_buffer.LifoAlloc_, false, true, &pools_[3]); for (int i = 0; i < 4; i++) { if (pools_[i].poolData == nullptr) { @@ -1249,15 +1240,15 @@ class Assembler : public AssemblerShared }; public: - // Given the start of a Control Flow sequence, grab the value that is finally branched to - // given the start of a function that loads an address into a register get the address that - // ends up in the register. + // Given the start of a Control Flow sequence, grab the value that is + // finally branched to given the start of a function that loads an address + // into a register get the address that ends up in the register. template - static const uint32_t * getCF32Target(Iter *iter); + static const uint32_t *GetCF32Target(Iter *iter); - static uintptr_t getPointer(uint8_t *); + static uintptr_t GetPointer(uint8_t *); template - static const uint32_t * getPtr32Target(Iter *iter, Register *dest = nullptr, RelocStyle *rs = nullptr); + static const uint32_t *GetPtr32Target(Iter *iter, Register *dest = nullptr, RelocStyle *rs = nullptr); bool oom() const; @@ -1291,8 +1282,8 @@ class Assembler : public AssemblerShared // Size of the data table, in bytes. size_t bytesNeeded() const; - // Write a blob of binary into the instruction stream *OR* - // into a destination address. If dest is nullptr (the default), then the + // Write a blob of binary into the instruction stream *OR* into a + // destination address. If dest is nullptr (the default), then the // instruction gets written into the instruction stream. If dest is not null // it is interpreted as a pointer to the location that we want the // instruction to be written. @@ -1303,7 +1294,7 @@ class Assembler : public AssemblerShared // A static variant for the cases where we don't want to have an assembler // object at all. Normally, you would use the dummy (nullptr) object. - static void writeInstStatic(uint32_t x, uint32_t *dest); + static void WriteInstStatic(uint32_t x, uint32_t *dest); public: void writeCodePointer(AbsoluteLabel *label); @@ -1317,7 +1308,7 @@ class Assembler : public AssemblerShared Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = nullptr); BufferOffset as_mvn(Register dest, Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); - // logical operations + // Logical operations: BufferOffset as_and(Register dest, Register src1, Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); BufferOffset as_bic(Register dest, Register src1, @@ -1326,7 +1317,7 @@ class Assembler : public AssemblerShared Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); BufferOffset as_orr(Register dest, Register src1, Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); - // mathematical operations + // Mathematical operations: BufferOffset as_adc(Register dest, Register src1, Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); BufferOffset as_add(Register dest, Register src1, @@ -1339,7 +1330,7 @@ class Assembler : public AssemblerShared Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); BufferOffset as_rsc(Register dest, Register src1, Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); - // test operations + // Test operations: BufferOffset as_cmn(Register src1, Operand2 op2, Condition c = Always); BufferOffset as_cmp(Register src1, Operand2 op2, @@ -1349,9 +1340,9 @@ class Assembler : public AssemblerShared BufferOffset as_tst(Register src1, Operand2 op2, Condition c = Always); - // Not quite ALU worthy, but useful none the less: - // These also have the isue of these being formatted - // completly differently from the standard ALU operations. + // Not quite ALU worthy, but useful none the less: These also have the isue + // of these being formatted completly differently from the standard ALU + // operations. BufferOffset as_movw(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = nullptr); BufferOffset as_movt(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = nullptr); @@ -1378,54 +1369,50 @@ class Assembler : public AssemblerShared BufferOffset as_udiv(Register dest, Register num, Register div, Condition c = Always); // Data transfer instructions: ldr, str, ldrb, strb. - // Using an int to differentiate between 8 bits and 32 bits is - // overkill, but meh + // Using an int to differentiate between 8 bits and 32 bits is overkill. BufferOffset as_dtr(LoadStore ls, int size, Index mode, Register rt, DTRAddr addr, Condition c = Always, uint32_t *dest = nullptr); // Handles all of the other integral data transferring functions: - // ldrsb, ldrsh, ldrd, etc. - // size is given in bits. + // ldrsb, ldrsh, ldrd, etc. The size is given in bits. BufferOffset as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode, Register rt, EDtrAddr addr, Condition c = Always, uint32_t *dest = nullptr); BufferOffset as_dtm(LoadStore ls, Register rn, uint32_t mask, DTMMode mode, DTMWriteBack wb, Condition c = Always); - //overwrite a pool entry with new data. + // Overwrite a pool entry with new data. void as_WritePoolEntry(Instruction *addr, Condition c, uint32_t data); - // load a 32 bit immediate from a pool into a register + // Load a 32 bit immediate from a pool into a register. BufferOffset as_Imm32Pool(Register dest, uint32_t value, Condition c = Always); - // make a patchable jump that can target the entire 32 bit address space. + // Make a patchable jump that can target the entire 32 bit address space. BufferOffset as_BranchPool(uint32_t value, RepatchLabel *label, ARMBuffer::PoolEntry *pe = nullptr, Condition c = Always); - // load a 64 bit floating point immediate from a pool into a register + // Load a 64 bit floating point immediate from a pool into a register. BufferOffset as_FImm64Pool(VFPRegister dest, double value, Condition c = Always); - // load a 32 bit floating point immediate from a pool into a register + // Load a 32 bit floating point immediate from a pool into a register. BufferOffset as_FImm32Pool(VFPRegister dest, float value, Condition c = Always); // Control flow stuff: - // bx can *only* branch to a register - // never to an immediate. + // bx can *only* branch to a register never to an immediate. BufferOffset as_bx(Register r, Condition c = Always, bool isPatchable = false); - // Branch can branch to an immediate *or* to a register. - // Branches to immediates are pc relative, branches to registers - // are absolute + // Branch can branch to an immediate *or* to a register. Branches to + // immediates are pc relative, branches to registers are absolute. BufferOffset as_b(BOffImm off, Condition c, bool isPatchable = false); BufferOffset as_b(Label *l, Condition c = Always, bool isPatchable = false); BufferOffset as_b(BOffImm off, Condition c, BufferOffset inst); - // blx can go to either an immediate or a register. - // When blx'ing to a register, we change processor mode - // depending on the low bit of the register - // when blx'ing to an immediate, we *always* change processor state. + // blx can go to either an immediate or a register. When blx'ing to a + // register, we change processor mode depending on the low bit of the + // register when blx'ing to an immediate, we *always* change processor + // state. BufferOffset as_blx(Label *l); BufferOffset as_blx(Register r, Condition c = Always); BufferOffset as_bl(BOffImm off, Condition c); - // bl can only branch+link to an immediate, never to a register - // it never changes processor state + // bl can only branch+link to an immediate, never to a register it never + // changes processor state. BufferOffset as_bl(); // bl #imm can have a condition code, blx #imm cannot. // blx reg can be conditional. @@ -1438,8 +1425,8 @@ class Assembler : public AssemblerShared private: enum vfp_size { - isDouble = 1 << 8, - isSingle = 0 << 8 + IsDouble = 1 << 8, + IsSingle = 0 << 8 }; BufferOffset writeVFPInst(vfp_size sz, uint32_t blob, uint32_t *dest=nullptr); @@ -1480,9 +1467,9 @@ class Assembler : public AssemblerShared Condition c = Always); BufferOffset as_vcmpz(VFPRegister vd, Condition c = Always); - // specifically, a move between two same sized-registers + // Specifically, a move between two same sized-registers. BufferOffset as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c = Always); - /*xfer between Core and VFP*/ + // Transfer between Core and VFP. enum FloatToCore_ { FloatToCore = 1 << 20, CoreToFloat = 0 << 20 @@ -1496,28 +1483,28 @@ class Assembler : public AssemblerShared public: // Unlike the next function, moving between the core registers and vfp - // registers can't be *that* properly typed. Namely, since I don't want to - // munge the type VFPRegister to also include core registers. Thus, the core + // registers can't be *that* properly typed. Namely, since I don't want to + // munge the type VFPRegister to also include core registers. Thus, the core // and vfp registers are passed in based on their type, and src/dest is // determined by the float2core. BufferOffset as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c, Condition c = Always, int idx = 0); - // our encoding actually allows just the src and the dest (and theiyr types) + // Our encoding actually allows just the src and the dest (and their types) // to uniquely specify the encoding that we are going to use. BufferOffset as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR = false, Condition c = Always); - // hard coded to a 32 bit fixed width result for now + // Hard coded to a 32 bit fixed width result for now. BufferOffset as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint, bool toFixed, Condition c = Always); - /* xfer between VFP and memory*/ + // Transfer between VFP and memory. BufferOffset as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr, Condition c = Always /* vfp doesn't have a wb option*/, uint32_t *dest = nullptr); - // VFP's ldm/stm work differently from the standard arm ones. - // You can only transfer a range + // VFP's ldm/stm work differently from the standard arm ones. You can only + // transfer a range. BufferOffset as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length, /*also has update conditions*/Condition c = Always); @@ -1526,7 +1513,7 @@ class Assembler : public AssemblerShared BufferOffset as_vmrs(Register r, Condition c = Always); BufferOffset as_vmsr(Register r, Condition c = Always); - // label operations + // Label operations. bool nextLink(BufferOffset b, BufferOffset *next); void bind(Label *label, BufferOffset boff = BufferOffset()); void bind(RepatchLabel *label); @@ -1638,7 +1625,7 @@ class Assembler : public AssemblerShared dtmActive = false; JS_ASSERT(dtmLastReg != -1); dtmDelta = dtmDelta ? dtmDelta : 1; - // fencepost problem. + // Fencepost problem. int len = dtmDelta * (dtmLastReg - vdtmFirstReg) + 1; as_vdtm(dtmLoadStore, dtmBase, VFPRegister(FloatRegister::FromCode(Min(vdtmFirstReg, dtmLastReg))), @@ -1659,61 +1646,58 @@ class Assembler : public AssemblerShared public: enum { - padForAlign8 = (int)0x00, - padForAlign16 = (int)0x0000, - padForAlign32 = (int)0xe12fff7f // 'bkpt 0xffff' + PadForAlign8 = (int)0x00, + PadForAlign16 = (int)0x0000, + PadForAlign32 = (int)0xe12fff7f // 'bkpt 0xffff' }; - // API for speaking with the IonAssemblerBufferWithConstantPools - // generate an initial placeholder instruction that we want to later fix up - static void insertTokenIntoTag(uint32_t size, uint8_t *load, int32_t token); - // take the stub value that was written in before, and write in an actual load - // using the index we'd computed previously as well as the address of the pool start. - static bool patchConstantPoolLoad(void* loadAddr, void* constPoolAddr); - // this is a callback for when we have filled a pool, and MUST flush it now. - // The pool requires the assembler to place a branch past the pool, and it - // calls this function. - static uint32_t placeConstantPoolBarrier(int offset); + // API for speaking with the IonAssemblerBufferWithConstantPools generate an + // initial placeholder instruction that we want to later fix up. + static void InsertTokenIntoTag(uint32_t size, uint8_t *load, int32_t token); + // Take the stub value that was written in before, and write in an actual + // load using the index we'd computed previously as well as the address of + // the pool start. + static bool PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr); // END API - // move our entire pool into the instruction stream - // This is to force an opportunistic dump of the pool, prefferably when it - // is more convenient to do a dump. + // Move our entire pool into the instruction stream. This is to force an + // opportunistic dump of the pool, prefferably when it is more convenient to + // do a dump. void dumpPool(); void flushBuffer(); void enterNoPool(); void leaveNoPool(); - // this should return a BOffImm, but I didn't want to require everyplace that used the - // AssemblerBuffer to make that class. + // This should return a BOffImm, but we didn't want to require everyplace + // that used the AssemblerBuffer to make that class. static ptrdiff_t getBranchOffset(const Instruction *i); - static void retargetNearBranch(Instruction *i, int offset, Condition cond, bool final = true); - static void retargetNearBranch(Instruction *i, int offset, bool final = true); - static void retargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond); + static void RetargetNearBranch(Instruction *i, int offset, Condition cond, bool final = true); + static void RetargetNearBranch(Instruction *i, int offset, bool final = true); + static void RetargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond); - static void writePoolHeader(uint8_t *start, Pool *p, bool isNatural); - static void writePoolFooter(uint8_t *start, Pool *p, bool isNatural); - static void writePoolGuard(BufferOffset branch, Instruction *inst, BufferOffset dest); + static void WritePoolHeader(uint8_t *start, Pool *p, bool isNatural); + static void WritePoolFooter(uint8_t *start, Pool *p, bool isNatural); + static void WritePoolGuard(BufferOffset branch, Instruction *inst, BufferOffset dest); - static uint32_t patchWrite_NearCallSize(); - static uint32_t nopSize() { return 4; } - static void patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall); - static void patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, + static uint32_t PatchWrite_NearCallSize(); + static uint32_t NopSize() { return 4; } + static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall); + static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, PatchedImmPtr expectedValue); - static void patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, + static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue); - static void patchWrite_Imm32(CodeLocationLabel label, Imm32 imm); + static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm); - static void patchInstructionImmediate(uint8_t *code, PatchedImmPtr imm) { + static void PatchInstructionImmediate(uint8_t *code, PatchedImmPtr imm) { MOZ_ASSUME_UNREACHABLE("Unused."); } - static uint32_t alignDoubleArg(uint32_t offset) { - return (offset+1)&~1; + static uint32_t AlignDoubleArg(uint32_t offset) { + return (offset + 1) & ~1; } - static uint8_t *nextInstruction(uint8_t *instruction, uint32_t *count = nullptr); - // Toggle a jmp or cmp emitted by toggledJump(). + static uint8_t *NextInstruction(uint8_t *instruction, uint32_t *count = nullptr); + // Toggle a jmp or cmp emitted by toggledJump(). static void ToggleToJmp(CodeLocationLabel inst_); static void ToggleToCmp(CodeLocationLabel inst_); @@ -1722,9 +1706,9 @@ class Assembler : public AssemblerShared static size_t ToggledCallSize(uint8_t *code); static void ToggleCall(CodeLocationLabel inst_, bool enabled); - static void updateBoundsCheck(uint32_t logHeapSize, Instruction *inst); + static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction *inst); void processCodeLabels(uint8_t *rawCode); - static int32_t extractCodeLabelOffset(uint8_t *code) { + static int32_t ExtractCodeLabelOffset(uint8_t *code) { return *(uintptr_t *)code; } @@ -1733,43 +1717,44 @@ class Assembler : public AssemblerShared } }; // Assembler -// An Instruction is a structure for both encoding and decoding any and all ARM instructions. -// many classes have not been implemented thusfar. +// An Instruction is a structure for both encoding and decoding any and all ARM +// instructions. Many classes have not been implemented thus far. class Instruction { uint32_t data; protected: // This is not for defaulting to always, this is for instructions that - // cannot be made conditional, and have the usually invalid 4b1111 cond field + // cannot be made conditional, and have the usually invalid 4b1111 cond + // field. Instruction (uint32_t data_, bool fake = false) : data(data_ | 0xf0000000) { JS_ASSERT (fake || ((data_ & 0xf0000000) == 0)); } - // Standard constructor + // Standard constructor. Instruction (uint32_t data_, Assembler::Condition c) : data(data_ | (uint32_t) c) { JS_ASSERT ((data_ & 0xf0000000) == 0); } - // You should never create an instruction directly. You should create a - // more specific instruction which will eventually call one of these - // constructors for you. + // You should never create an instruction directly. You should create a more + // specific instruction which will eventually call one of these constructors + // for you. public: uint32_t encode() const { return data; } - // Check if this instruction is really a particular case + // Check if this instruction is really a particular case. template - bool is() const { return C::isTHIS(*this); } + bool is() const { return C::IsTHIS(*this); } - // safely get a more specific variant of this pointer + // Safely get a more specific variant of this pointer. template - C *as() const { return C::asTHIS(*this); } + C *as() const { return C::AsTHIS(*this); } const Instruction & operator=(const Instruction &src) { data = src.data; return *this; } - // Since almost all instructions have condition codes, the condition - // code extractor resides in the base class. + // Since almost all instructions have condition codes, the condition code + // extractor resides in the base class. void extractCond(Assembler::Condition *c) { if (data >> 28 != 0xf ) *c = (Assembler::Condition)(data & 0xf0000000); @@ -1781,16 +1766,16 @@ class Instruction // Skipping pools with artificial guards. Instruction *skipPool(); - // Sometimes, an api wants a uint32_t (or a pointer to it) rather than - // an instruction. raw() just coerces this into a pointer to a uint32_t + // Sometimes, an api wants a uint32_t (or a pointer to it) rather than an + // instruction. raw() just coerces this into a pointer to a uint32_t. const uint32_t *raw() const { return &data; } uint32_t size() const { return 4; } }; // Instruction -// make sure that it is the right size +// Make sure that it is the right size. JS_STATIC_ASSERT(sizeof(Instruction) == 4); -// Data Transfer Instructions +// Data Transfer Instructions. class InstDTR : public Instruction { public: @@ -1806,8 +1791,8 @@ class InstDTR : public Instruction : Instruction(ls | ib | mode | RT(rt) | addr.encode() | IsDTR, c) { } - static bool isTHIS(const Instruction &i); - static InstDTR *asTHIS(const Instruction &i); + static bool IsTHIS(const Instruction &i); + static InstDTR *AsTHIS(const Instruction &i); }; JS_STATIC_ASSERT(sizeof(InstDTR) == sizeof(Instruction)); @@ -1818,8 +1803,8 @@ class InstLDR : public InstDTR InstLDR(Index mode, Register rt, DTRAddr addr, Assembler::Condition c) : InstDTR(IsLoad, IsWord, mode, rt, addr, c) { } - static bool isTHIS(const Instruction &i); - static InstLDR *asTHIS(const Instruction &i); + static bool IsTHIS(const Instruction &i); + static InstLDR *AsTHIS(const Instruction &i); }; JS_STATIC_ASSERT(sizeof(InstDTR) == sizeof(InstLDR)); @@ -1833,8 +1818,8 @@ class InstNOP : public Instruction : Instruction(NopInst, Assembler::Always) { } - static bool isTHIS(const Instruction &i); - static InstNOP *asTHIS(Instruction &i); + static bool IsTHIS(const Instruction &i); + static InstNOP *AsTHIS(Instruction &i); }; // Branching to a register, or calling a register @@ -1851,8 +1836,8 @@ class InstBranchReg : public Instruction : Instruction(tag | rm.code(), c) { } public: - static bool isTHIS (const Instruction &i); - static InstBranchReg *asTHIS (const Instruction &i); + static bool IsTHIS (const Instruction &i); + static InstBranchReg *AsTHIS (const Instruction &i); // Get the register that is being branched to void extractDest(Register *dest); // Make sure we are branching to a pre-known register @@ -1875,8 +1860,8 @@ class InstBranchImm : public Instruction { } public: - static bool isTHIS (const Instruction &i); - static InstBranchImm *asTHIS (const Instruction &i); + static bool IsTHIS (const Instruction &i); + static InstBranchImm *AsTHIS (const Instruction &i); void extractImm(BOffImm *dest); }; JS_STATIC_ASSERT(sizeof(InstBranchImm) == sizeof(Instruction)); @@ -1885,8 +1870,8 @@ JS_STATIC_ASSERT(sizeof(InstBranchImm) == sizeof(Instruction)); class InstBXReg : public InstBranchReg { public: - static bool isTHIS (const Instruction &i); - static InstBXReg *asTHIS (const Instruction &i); + static bool IsTHIS (const Instruction &i); + static InstBXReg *AsTHIS (const Instruction &i); }; class InstBLXReg : public InstBranchReg { @@ -1895,8 +1880,8 @@ class InstBLXReg : public InstBranchReg : InstBranchReg(IsBLX, reg, c) { } - static bool isTHIS (const Instruction &i); - static InstBLXReg *asTHIS (const Instruction &i); + static bool IsTHIS (const Instruction &i); + static InstBLXReg *AsTHIS (const Instruction &i); }; class InstBImm : public InstBranchImm { @@ -1905,8 +1890,8 @@ class InstBImm : public InstBranchImm : InstBranchImm(IsB, off, c) { } - static bool isTHIS (const Instruction &i); - static InstBImm *asTHIS (const Instruction &i); + static bool IsTHIS (const Instruction &i); + static InstBImm *AsTHIS (const Instruction &i); }; class InstBLImm : public InstBranchImm { @@ -1915,8 +1900,8 @@ class InstBLImm : public InstBranchImm : InstBranchImm(IsBL, off, c) { } - static bool isTHIS (const Instruction &i); - static InstBLImm *asTHIS (Instruction &i); + static bool IsTHIS (const Instruction &i); + static InstBLImm *AsTHIS (Instruction &i); }; // Both movw and movt. The layout of both the immediate and the destination @@ -1940,8 +1925,8 @@ class InstMovWT : public Instruction bool checkImm(Imm16 dest); bool checkDest(Register dest); - static bool isTHIS (Instruction &i); - static InstMovWT *asTHIS (Instruction &i); + static bool IsTHIS (Instruction &i); + static InstMovWT *AsTHIS (Instruction &i); }; JS_STATIC_ASSERT(sizeof(InstMovWT) == sizeof(Instruction)); @@ -1953,8 +1938,8 @@ class InstMovW : public InstMovWT : InstMovWT(rd, imm, IsW, c) { } - static bool isTHIS (const Instruction &i); - static InstMovW *asTHIS (const Instruction &i); + static bool IsTHIS (const Instruction &i); + static InstMovW *AsTHIS (const Instruction &i); }; class InstMovT : public InstMovWT @@ -1963,8 +1948,8 @@ class InstMovT : public InstMovWT InstMovT (Register rd, Imm16 imm, Assembler::Condition c) : InstMovWT(rd, imm, IsT, c) { } - static bool isTHIS (const Instruction &i); - static InstMovT *asTHIS (const Instruction &i); + static bool IsTHIS (const Instruction &i); + static InstMovT *AsTHIS (const Instruction &i); }; class InstALU : public Instruction @@ -1974,8 +1959,8 @@ class InstALU : public Instruction InstALU (Register rd, Register rn, Operand2 op2, ALUOp op, SetCond_ sc, Assembler::Condition c) : Instruction(maybeRD(rd) | maybeRN(rn) | op2.encode() | op | sc, c) { } - static bool isTHIS (const Instruction &i); - static InstALU *asTHIS (const Instruction &i); + static bool IsTHIS (const Instruction &i); + static InstALU *AsTHIS (const Instruction &i); void extractOp(ALUOp *ret); bool checkOp(ALUOp op); void extractDest(Register *ret); @@ -1988,15 +1973,15 @@ class InstALU : public Instruction class InstCMP : public InstALU { public: - static bool isTHIS (const Instruction &i); - static InstCMP *asTHIS (const Instruction &i); + static bool IsTHIS (const Instruction &i); + static InstCMP *AsTHIS (const Instruction &i); }; class InstMOV : public InstALU { public: - static bool isTHIS (const Instruction &i); - static InstMOV *asTHIS (const Instruction &i); + static bool IsTHIS (const Instruction &i); + static InstMOV *AsTHIS (const Instruction &i); }; @@ -2027,7 +2012,7 @@ GetIntArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register *out) } // Get a register in which we plan to put a quantity that will be used as an -// integer argument. This differs from GetIntArgReg in that if we have no more +// integer argument. This differs from GetIntArgReg in that if we have no more // actual argument registers to use we will fall back on using whatever // CallTempReg* don't overlap the argument registers, and only fail once those // run out too. @@ -2103,7 +2088,7 @@ GetDoubleArgStackDisp(uint32_t usedIntArgs, uint32_t usedFloatArgs, uint32_t *pa uint32_t intSlots = 0; if (usedIntArgs > NumIntArgRegs) { intSlots = usedIntArgs - NumIntArgRegs; - // update the amount of padding required. + // Update the amount of padding required. *padding += (*padding + usedIntArgs) % 2; } uint32_t doubleSlots = usedFloatArgs - NumFloatArgRegs; @@ -2124,18 +2109,17 @@ class DoubleEncoder { } uint32_t encode(uint8_t value) { - //ARM ARM "VFP modified immediate constants" - // aBbbbbbb bbcdefgh 000... - // we want to return the top 32 bits of the double - // the rest are 0. + // ARM ARM "VFP modified immediate constants" + // aBbbbbbb bbcdefgh 000... + // We want to return the top 32 bits of the double the rest are 0. bool a = value >> 7; bool b = value >> 6 & 1; bool B = !b; uint32_t cdefgh = value & 0x3f; - return a << 31 | - B << 30 | - rep(b, 8) << 22 | - cdefgh << 16; + return a << 31 | + B << 30 | + rep(b, 8) << 22 | + cdefgh << 16; } struct DoubleEntry diff --git a/js/src/jit/arm/Bailouts-arm.cpp b/js/src/jit/arm/Bailouts-arm.cpp index eaed651e8fc0..0db5a3d7ef8d 100644 --- a/js/src/jit/arm/Bailouts-arm.cpp +++ b/js/src/jit/arm/Bailouts-arm.cpp @@ -20,9 +20,9 @@ namespace jit { class BailoutStack { uintptr_t frameClassId_; - // This is pushed in the bailout handler. Both entry points into the handler + // This is pushed in the bailout handler. Both entry points into the handler // inserts their own value int lr, which is then placed onto the stack along - // with frameClassId_ above. This should be migrated to ip. + // with frameClassId_ above. This should be migrated to ip. public: union { uintptr_t frameSize_; diff --git a/js/src/jit/arm/BaselineHelpers-arm.h b/js/src/jit/arm/BaselineHelpers-arm.h index 4a489476299a..fb320453b8ef 100644 --- a/js/src/jit/arm/BaselineHelpers-arm.h +++ b/js/src/jit/arm/BaselineHelpers-arm.h @@ -46,7 +46,7 @@ EmitCallIC(CodeOffsetLabel *patchOffset, MacroAssembler &masm) JS_ASSERT(R2 == ValueOperand(r1, r0)); masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), r0); - // Call the stubcode via a direct branch-and-link + // Call the stubcode via a direct branch-and-link. masm.ma_blx(r0); } @@ -54,8 +54,8 @@ inline void EmitEnterTypeMonitorIC(MacroAssembler &masm, size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub()) { - // This is expected to be called from within an IC, when BaselineStubReg - // is properly initialized to point to the stub. + // This is expected to be called from within an IC, when BaselineStubReg is + // properly initialized to point to the stub. masm.loadPtr(Address(BaselineStubReg, (uint32_t) monitorStubOffset), BaselineStubReg); // Load stubcode pointer from BaselineStubEntry. @@ -96,9 +96,9 @@ EmitTailCallVM(JitCode *target, MacroAssembler &masm, uint32_t argSize) masm.store32(r1, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize())); // Push frame descriptor and perform the tail call. - // BaselineTailCallReg (lr) already contains the return address (as we keep it there through - // the stub calls), but the VMWrapper code being called expects the return address to also - // be pushed on the stack. + // BaselineTailCallReg (lr) already contains the return address (as we keep + // it there through the stub calls), but the VMWrapper code being called + // expects the return address to also be pushed on the stack. JS_ASSERT(BaselineTailCallReg == lr); masm.makeFrameDescriptor(r0, JitFrame_BaselineJS); masm.push(r0); @@ -109,8 +109,8 @@ EmitTailCallVM(JitCode *target, MacroAssembler &masm, uint32_t argSize) inline void EmitCreateStubFrameDescriptor(MacroAssembler &masm, Register reg) { - // Compute stub frame size. We have to add two pointers: the stub reg and previous - // frame pointer pushed by EmitEnterStubFrame. + // Compute stub frame size. We have to add two pointers: the stub reg and + // previous frame pointer pushed by EmitEnterStubFrame. masm.mov(BaselineFrameReg, reg); masm.ma_add(Imm32(sizeof(void *) * 2), reg); masm.ma_sub(BaselineStackReg, reg); @@ -142,8 +142,8 @@ EmitEnterStubFrame(MacroAssembler &masm, Register scratch) masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize())); - // Note: when making changes here, don't forget to update STUB_FRAME_SIZE - // if needed. + // Note: when making changes here, don't forget to update STUB_FRAME_SIZE if + // needed. // Push frame descriptor and return address. masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS); @@ -162,10 +162,10 @@ EmitEnterStubFrame(MacroAssembler &masm, Register scratch) inline void EmitLeaveStubFrame(MacroAssembler &masm, bool calledIntoIon = false) { - // Ion frames do not save and restore the frame pointer. If we called - // into Ion, we have to restore the stack pointer from the frame descriptor. - // If we performed a VM call, the descriptor has been popped already so - // in that case we use the frame pointer. + // Ion frames do not save and restore the frame pointer. If we called into + // Ion, we have to restore the stack pointer from the frame descriptor. If + // we performed a VM call, the descriptor has been popped already so in that + // case we use the frame pointer. if (calledIntoIon) { masm.pop(ScratchRegister); masm.ma_lsr(Imm32(FRAMESIZE_SHIFT), ScratchRegister, ScratchRegister); @@ -190,11 +190,11 @@ EmitStowICValues(MacroAssembler &masm, int values) JS_ASSERT(values >= 0 && values <= 2); switch(values) { case 1: - // Stow R0 + // Stow R0. masm.pushValue(R0); break; case 2: - // Stow R0 and R1 + // Stow R0 and R1. masm.pushValue(R0); masm.pushValue(R1); break; @@ -207,14 +207,14 @@ EmitUnstowICValues(MacroAssembler &masm, int values, bool discard = false) JS_ASSERT(values >= 0 && values <= 2); switch(values) { case 1: - // Unstow R0 + // Unstow R0. if (discard) masm.addPtr(Imm32(sizeof(Value)), BaselineStackReg); else masm.popValue(R0); break; case 2: - // Unstow R0 and R1 + // Unstow R0 and R1. if (discard) { masm.addPtr(Imm32(sizeof(Value) * 2), BaselineStackReg); } else { @@ -230,17 +230,17 @@ EmitCallTypeUpdateIC(MacroAssembler &masm, JitCode *code, uint32_t objectOffset) { JS_ASSERT(R2 == ValueOperand(r1, r0)); - // R0 contains the value that needs to be typechecked. - // The object we're updating is a boxed Value on the stack, at offset - // objectOffset from esp, excluding the return address. + // R0 contains the value that needs to be typechecked. The object we're + // updating is a boxed Value on the stack, at offset objectOffset from esp, + // excluding the return address. // Save the current BaselineStubReg to stack, as well as the TailCallReg, // since on ARM, the LR is live. masm.push(BaselineStubReg); masm.push(BaselineTailCallReg); - // This is expected to be called from within an IC, when BaselineStubReg - // is properly initialized to point to the stub. + // This is expected to be called from within an IC, when BaselineStubReg is + // properly initialized to point to the stub. masm.loadPtr(Address(BaselineStubReg, ICUpdatedStub::offsetOfFirstUpdateStub()), BaselineStubReg); @@ -286,7 +286,7 @@ template inline void EmitPreBarrier(MacroAssembler &masm, const AddrType &addr, MIRType type) { - // on ARM, lr is clobbered by patchableCallPreBarrier. Save it first. + // On ARM, lr is clobbered by patchableCallPreBarrier. Save it first. masm.push(lr); masm.patchableCallPreBarrier(addr, type); masm.pop(lr); @@ -302,7 +302,7 @@ EmitStubGuardFailure(MacroAssembler &masm) // BaselineStubEntry points to the current stub. - // Load next stub into BaselineStubReg + // Load next stub into BaselineStubReg. masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfNext()), BaselineStubReg); // Load stubcode pointer from BaselineStubEntry into scratch register. diff --git a/js/src/jit/arm/BaselineIC-arm.cpp b/js/src/jit/arm/BaselineIC-arm.cpp index 03a96734d40c..0f37e5685ad1 100644 --- a/js/src/jit/arm/BaselineIC-arm.cpp +++ b/js/src/jit/arm/BaselineIC-arm.cpp @@ -36,7 +36,7 @@ ICCompare_Int32::Compiler::generateStubCode(MacroAssembler &masm) masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.payloadReg(), R0); EmitReturnFromIC(masm); - // Failure case - jump to next stub + // Failure case - jump to next stub. masm.bind(&failure); EmitStubGuardFailure(masm); @@ -62,7 +62,7 @@ ICCompare_Double::Compiler::generateStubCode(MacroAssembler &masm) masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0); EmitReturnFromIC(masm); - // Failure case - jump to next stub + // Failure case - jump to next stub. masm.bind(&failure); EmitStubGuardFailure(masm); return true; @@ -82,7 +82,7 @@ ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm) masm.branchTestInt32(Assembler::NotEqual, R0, &failure); masm.branchTestInt32(Assembler::NotEqual, R1, &failure); - // Add R0 and R1. Don't need to explicitly unbox, just use R2's payloadReg. + // Add R0 and R1. Don't need to explicitly unbox, just use R2's payloadReg. Register scratchReg = R2.payloadReg(); // DIV and MOD need an extra non-volatile ValueOperand to hold R0. @@ -95,12 +95,12 @@ ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm) case JSOP_ADD: masm.ma_add(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCond); - // Just jump to failure on overflow. R0 and R1 are preserved, so we can just jump to - // the next stub. + // Just jump to failure on overflow. R0 and R1 are preserved, so we can + // just jump to the next stub. masm.j(Assembler::Overflow, &failure); - // Box the result and return. We know R0.typeReg() already contains the integer - // tag, so we just need to move the result value into place. + // Box the result and return. We know R0.typeReg() already contains the + // integer tag, so we just need to move the result value into place. masm.mov(scratchReg, R0.payloadReg()); break; case JSOP_SUB: @@ -131,7 +131,8 @@ ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm) masm.ma_cmp(R0.payloadReg(), Imm32(0), Assembler::LessThan); masm.j(Assembler::Equal, &failure); - // The call will preserve registers r4-r11. Save R0 and the link register. + // The call will preserve registers r4-r11. Save R0 and the link + // register. JS_ASSERT(R1 == ValueOperand(r5, r4)); JS_ASSERT(R0 == ValueOperand(r3, r2)); masm.moveValue(R0, savedValue); @@ -222,7 +223,7 @@ ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm) break; } - // Failure case - jump to next stub + // Failure case - jump to next stub. masm.bind(&failure); EmitStubGuardFailure(masm); diff --git a/js/src/jit/arm/BaselineRegisters-arm.h b/js/src/jit/arm/BaselineRegisters-arm.h index d83fdd1f8efc..8246adee82e9 100644 --- a/js/src/jit/arm/BaselineRegisters-arm.h +++ b/js/src/jit/arm/BaselineRegisters-arm.h @@ -23,16 +23,14 @@ static MOZ_CONSTEXPR_VAR Register BaselineFrameReg = r11; static MOZ_CONSTEXPR_VAR Register BaselineStackReg = sp; // ValueOperands R0, R1, and R2. -// R0 == JSReturnReg, and R2 uses registers not -// preserved across calls. R1 value should be -// preserved across calls. +// R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value +// should be preserved across calls. static MOZ_CONSTEXPR_VAR ValueOperand R0(r3, r2); static MOZ_CONSTEXPR_VAR ValueOperand R1(r5, r4); static MOZ_CONSTEXPR_VAR ValueOperand R2(r1, r0); // BaselineTailCallReg and BaselineStubReg -// These use registers that are not preserved across -// calls. +// These use registers that are not preserved across calls. static MOZ_CONSTEXPR_VAR Register BaselineTailCallReg = r14; static MOZ_CONSTEXPR_VAR Register BaselineStubReg = r9; @@ -44,9 +42,9 @@ static MOZ_CONSTEXPR_VAR Register BaselineSecondScratchReg = r6; // R7 - R9 are generally available for use within stubcode. -// Note that BaselineTailCallReg is actually just the link -// register. In ARM code emission, we do not clobber BaselineTailCallReg -// since we keep the return address for calls there. +// Note that BaselineTailCallReg is actually just the link register. In ARM code +// emission, we do not clobber BaselineTailCallReg since we keep the return +// address for calls there. // FloatReg0 must be equal to ReturnFloatReg. static MOZ_CONSTEXPR_VAR FloatRegister FloatReg0 = d0; diff --git a/js/src/jit/arm/CodeGenerator-arm.cpp b/js/src/jit/arm/CodeGenerator-arm.cpp index 4f98765d6eb2..1aa1a0480641 100644 --- a/js/src/jit/arm/CodeGenerator-arm.cpp +++ b/js/src/jit/arm/CodeGenerator-arm.cpp @@ -92,7 +92,7 @@ CodeGeneratorARM::generateEpilogue() masm.freeStack(frameSize()); JS_ASSERT(masm.framePushed() == 0); masm.pop(pc); - masm.dumpPool(); + masm.flushBuffer(); return true; } @@ -275,18 +275,22 @@ CodeGeneratorARM::visitMinMaxD(LMinMaxD *ins) Label nan, equal, returnSecond, done; masm.compareDouble(first, second); - masm.ma_b(&nan, Assembler::VFP_Unordered); // first or second is NaN, result is NaN. - masm.ma_b(&equal, Assembler::VFP_Equal); // make sure we handle -0 and 0 right. + // First or second is NaN, result is NaN. + masm.ma_b(&nan, Assembler::VFP_Unordered); + // Make sure we handle -0 and 0 right. + masm.ma_b(&equal, Assembler::VFP_Equal); masm.ma_b(&returnSecond, cond); masm.ma_b(&done); // Check for zero. masm.bind(&equal); masm.compareDouble(first, InvalidFloatReg); - masm.ma_b(&done, Assembler::VFP_NotEqualOrUnordered); // first wasn't 0 or -0, so just return it. + // First wasn't 0 or -0, so just return it. + masm.ma_b(&done, Assembler::VFP_NotEqualOrUnordered); // So now both operands are either -0 or 0. if (ins->mir()->isMax()) { - masm.ma_vadd(second, first, first); // -0 + -0 = -0 and -0 + 0 = 0. + // -0 + -0 = -0 and -0 + 0 = 0. + masm.ma_vadd(second, first, first); } else { masm.ma_vneg(first, first); masm.ma_vsub(first, second, first); @@ -403,11 +407,11 @@ CodeGeneratorARM::visitMulI(LMulI *ins) break; case 0: masm.ma_mov(Imm32(0), ToRegister(dest)); - return true; // escape overflow check; + return true; // Escape overflow check; case 1: - // nop + // Nop masm.ma_mov(ToRegister(lhs), ToRegister(dest)); - return true; // escape overflow check; + return true; // Escape overflow check; case 2: masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCond); // Overflow is handled later. @@ -417,17 +421,19 @@ CodeGeneratorARM::visitMulI(LMulI *ins) if (constant > 0) { // Try shift and add sequences for a positive constant. if (!mul->canOverflow()) { - // If it cannot overflow, we can do lots of optimizations + // If it cannot overflow, we can do lots of optimizations. Register src = ToRegister(lhs); uint32_t shift = FloorLog2(constant); uint32_t rest = constant - (1 << shift); - // See if the constant has one bit set, meaning it can be encoded as a bitshift + // See if the constant has one bit set, meaning it can be + // encoded as a bitshift. if ((1 << shift) == constant) { masm.ma_lsl(Imm32(shift), src, ToRegister(dest)); handled = true; } else { - // If the constant cannot be encoded as (1<> shift), if this does not hold, - // some bits were lost due to overflow, and the computation should - // be resumed as a double. + // At runtime, check (lhs == dest >> shift), if this + // does not hold, some bits were lost due to overflow, + // and the computation should be resumed as a double. masm.as_cmp(ToRegister(lhs), asr(ToRegister(dest), shift)); c = Assembler::NotEqual; handled = true; @@ -462,19 +468,19 @@ CodeGeneratorARM::visitMulI(LMulI *ins) } } } - // Bailout on overflow + // Bailout on overflow. if (mul->canOverflow() && !bailoutIf(c, ins->snapshot())) return false; } else { Assembler::Condition c = Assembler::Overflow; - //masm.imull(ToOperand(rhs), ToRegister(lhs)); + // masm.imull(ToOperand(rhs), ToRegister(lhs)); if (mul->canOverflow()) c = masm.ma_check_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), c); else masm.ma_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest)); - // Bailout on overflow + // Bailout on overflow. if (mul->canOverflow() && !bailoutIf(c, ins->snapshot())) return false; @@ -502,8 +508,11 @@ CodeGeneratorARM::divICommon(MDiv *mir, Register lhs, Register rhs, Register out if (mir->canBeNegativeOverflow()) { // Handle INT32_MIN / -1; // The integer division will give INT32_MIN, but we want -(double)INT32_MIN. - masm.ma_cmp(lhs, Imm32(INT32_MIN)); // sets EQ if lhs == INT32_MIN - masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT32_MIN), sets EQ if rhs == -1 + + // Sets EQ if lhs == INT32_MIN. + masm.ma_cmp(lhs, Imm32(INT32_MIN)); + // If EQ (LHS == INT32_MIN), sets EQ if rhs == -1. + masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); if (mir->canTruncateOverflow()) { // (-INT32_MIN)|0 = INT32_MIN Label skip; @@ -553,7 +562,7 @@ CodeGeneratorARM::divICommon(MDiv *mir, Register lhs, Register rhs, Register out bool CodeGeneratorARM::visitDivI(LDivI *ins) { - // Extract the registers from this instruction + // Extract the registers from this instruction. Register lhs = ToRegister(ins->lhs()); Register rhs = ToRegister(ins->rhs()); Register temp = ToRegister(ins->getTemp(0)); @@ -588,7 +597,7 @@ extern "C" { bool CodeGeneratorARM::visitSoftDivI(LSoftDivI *ins) { - // Extract the registers from this instruction + // Extract the registers from this instruction. Register lhs = ToRegister(ins->lhs()); Register rhs = ToRegister(ins->rhs()); Register output = ToRegister(ins->output()); @@ -662,19 +671,20 @@ bool CodeGeneratorARM::modICommon(MMod *mir, Register lhs, Register rhs, Register output, LSnapshot *snapshot, Label &done) { - // 0/X (with X < 0) is bad because both of these values *should* be doubles, and - // the result should be -0.0, which cannot be represented in integers. + // 0/X (with X < 0) is bad because both of these values *should* be doubles, + // and the result should be -0.0, which cannot be represented in integers. // X/0 is bad because it will give garbage (or abort), when it should give // either \infty, -\infty or NAN. // Prevent 0 / X (with X < 0) and X / 0 - // testing X / Y. Compare Y with 0. - // There are three cases: (Y < 0), (Y == 0) and (Y > 0) - // If (Y < 0), then we compare X with 0, and bail if X == 0 - // If (Y == 0), then we simply want to bail. Since this does not set - // the flags necessary for LT to trigger, we don't test X, and take the - // bailout because the EQ flag is set. - // if (Y > 0), we don't set EQ, and we don't trigger LT, so we don't take the bailout. + // testing X / Y. Compare Y with 0. + // There are three cases: (Y < 0), (Y == 0) and (Y > 0). + // If (Y < 0), then we compare X with 0, and bail if X == 0. + // If (Y == 0), then we simply want to bail. Since this does not set the + // flags necessary for LT to trigger, we don't test X, and take the bailout + // because the EQ flag is set. + // If (Y > 0), we don't set EQ, and we don't trigger LT, so we don't take + // the bailout. if (mir->canBeDivideByZero() || mir->canBeNegativeDividend()) { masm.ma_cmp(rhs, Imm32(0)); masm.ma_cmp(lhs, Imm32(0), Assembler::LessThan); @@ -704,7 +714,7 @@ CodeGeneratorARM::visitModI(LModI *ins) Register callTemp = ToRegister(ins->callTemp()); MMod *mir = ins->mir(); - // save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0. + // Save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0. masm.ma_mov(lhs, callTemp); Label done; @@ -713,7 +723,7 @@ CodeGeneratorARM::visitModI(LModI *ins) masm.ma_smod(lhs, rhs, output); - // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0 + // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0. if (mir->canBeNegativeDividend()) { if (mir->isTruncated()) { // -0.0|0 == 0 @@ -735,7 +745,7 @@ CodeGeneratorARM::visitModI(LModI *ins) bool CodeGeneratorARM::visitSoftModI(LSoftModI *ins) { - // Extract the registers from this instruction + // Extract the registers from this instruction. Register lhs = ToRegister(ins->lhs()); Register rhs = ToRegister(ins->rhs()); Register output = ToRegister(ins->output()); @@ -743,15 +753,17 @@ CodeGeneratorARM::visitSoftModI(LSoftModI *ins) MMod *mir = ins->mir(); Label done; - // save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0. + // Save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0. JS_ASSERT(callTemp.code() > r3.code() && callTemp.code() < r12.code()); masm.ma_mov(lhs, callTemp); // Prevent INT_MIN % -1; // The integer division will give INT_MIN, but we want -(double)INT_MIN. if (mir->canBeNegativeDividend()) { - masm.ma_cmp(lhs, Imm32(INT_MIN)); // sets EQ if lhs == INT_MIN - masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT_MIN), sets EQ if rhs == -1 + // Sets EQ if lhs == INT_MIN + masm.ma_cmp(lhs, Imm32(INT_MIN)); + // If EQ (LHS == INT_MIN), sets EQ if rhs == -1 + masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); if (mir->isTruncated()) { // (INT_MIN % -1)|0 == 0 Label skip; @@ -802,11 +814,12 @@ CodeGeneratorARM::visitModPowTwoI(LModPowTwoI *ins) Register out = ToRegister(ins->getDef(0)); MMod *mir = ins->mir(); Label fin; - // bug 739870, jbramley has a different sequence that may help with speed here + // bug 739870, jbramley has a different sequence that may help with speed + // here. masm.ma_mov(in, out, SetCond); masm.ma_b(&fin, Assembler::Zero); masm.ma_rsb(Imm32(0), out, NoSetCond, Assembler::Signed); - masm.ma_and(Imm32((1<shift())-1), out); + masm.ma_and(Imm32((1 << ins->shift()) - 1), out); masm.ma_rsb(Imm32(0), out, SetCond, Assembler::Signed); if (mir->canBeNegativeDividend()) { if (!mir->isTruncated()) { @@ -846,9 +859,8 @@ CodeGeneratorARM::visitBitNotI(LBitNotI *ins) { const LAllocation *input = ins->getOperand(0); const LDefinition *dest = ins->getDef(0); - // this will not actually be true on arm. - // We can not an imm8m in order to get a wider range - // of numbers + // This will not actually be true on arm. We can not an imm8m in order to + // get a wider range of numbers JS_ASSERT(!input->isConstant()); masm.ma_mvn(ToRegister(input), ToRegister(dest)); @@ -861,7 +873,7 @@ CodeGeneratorARM::visitBitOpI(LBitOpI *ins) const LAllocation *lhs = ins->getOperand(0); const LAllocation *rhs = ins->getOperand(1); const LDefinition *dest = ins->getDef(0); - // all of these bitops should be either imm32's, or integer registers. + // All of these bitops should be either imm32's, or integer registers. switch (ins->bitop()) { case JSOP_BITOR: if (rhs->isConstant()) @@ -928,8 +940,8 @@ CodeGeneratorARM::visitShiftI(LShiftI *ins) } } else { // The shift amounts should be AND'ed into the 0-31 range since arm - // shifts by the lower byte of the register (it will attempt to shift - // by 250 if you ask it to). + // shifts by the lower byte of the register (it will attempt to shift by + // 250 if you ask it to). masm.ma_and(Imm32(0x1F), ToRegister(rhs), dest); switch (ins->bitop()) { @@ -994,7 +1006,8 @@ CodeGeneratorARM::visitPowHalfD(LPowHalfD *ins) masm.ma_vneg(ScratchDoubleReg, output, Assembler::Equal); masm.ma_b(&done, Assembler::Equal); - // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5). Adding 0 converts any -0 to 0. + // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5). + // Adding 0 converts any -0 to 0. masm.ma_vimm(0.0, ScratchDoubleReg); masm.ma_vadd(ScratchDoubleReg, input, output); masm.ma_vsqrt(output, output); @@ -1067,45 +1080,46 @@ CodeGeneratorARM::visitOutOfLineTableSwitch(OutOfLineTableSwitch *ool) bool CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch *mir, Register index, Register base) { - // the code generated by this is utter hax. - // the end result looks something like: + // The code generated by this is utter hax. + // The end result looks something like: // SUBS index, input, #base // RSBSPL index, index, #max // LDRPL pc, pc, index lsl 2 // B default // If the range of targets in N through M, we first subtract off the lowest - // case (N), which both shifts the arguments into the range 0 to (M-N) with - // and sets the MInus flag if the argument was out of range on the low end. + // case (N), which both shifts the arguments into the range 0 to (M - N) + // with and sets the MInus flag if the argument was out of range on the low + // end. // Then we a reverse subtract with the size of the jump table, which will // reverse the order of range (It is size through 0, rather than 0 through - // size). The main purpose of this is that we set the same flag as the lower - // bound check for the upper bound check. Lastly, we do this conditionally + // size). The main purpose of this is that we set the same flag as the lower + // bound check for the upper bound check. Lastly, we do this conditionally // on the previous check succeeding. // Then we conditionally load the pc offset by the (reversed) index (times - // the address size) into the pc, which branches to the correct case. - // NOTE: when we go to read the pc, the value that we get back is the pc of - // the current instruction *PLUS 8*. This means that ldr foo, [pc, +0] - // reads $pc+8. In other words, there is an empty word after the branch into - // the switch table before the table actually starts. Since the only other - // unhandled case is the default case (both out of range high and out of range low) - // I then insert a branch to default case into the extra slot, which ensures - // we don't attempt to execute the address table. + // the address size) into the pc, which branches to the correct case. NOTE: + // when we go to read the pc, the value that we get back is the pc of the + // current instruction *PLUS 8*. This means that ldr foo, [pc, +0] reads + // $pc+8. In other words, there is an empty word after the branch into the + // switch table before the table actually starts. Since the only other + // unhandled case is the default case (both out of range high and out of + // range low) I then insert a branch to default case into the extra slot, + // which ensures we don't attempt to execute the address table. Label *defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label(); int32_t cases = mir->numCases(); - // Lower value with low value + // Lower value with low value. masm.ma_sub(index, Imm32(mir->low()), index, SetCond); masm.ma_rsb(index, Imm32(cases - 1), index, SetCond, Assembler::NotSigned); AutoForbidPools afp(&masm); masm.ma_ldr(DTRAddr(pc, DtrRegImmShift(index, LSL, 2)), pc, Offset, Assembler::NotSigned); masm.ma_b(defaultcase); - // To fill in the CodeLabels for the case entries, we need to first - // generate the case entries (we don't yet know their offsets in the - // instruction stream). + // To fill in the CodeLabels for the case entries, we need to first generate + // the case entries (we don't yet know their offsets in the instruction + // stream). OutOfLineTableSwitch *ool = new(alloc()) OutOfLineTableSwitch(alloc(), mir); for (int32_t i = 0; i < cases; i++) { CodeLabel cl; @@ -1226,8 +1240,8 @@ CodeGeneratorARM::visitRound(LRound *lir) Register output = ToRegister(lir->output()); FloatRegister tmp = ToFloatRegister(lir->temp()); Label bail; - // Output is either correct, or clamped. All -0 cases have been translated to a clamped - // case.a + // Output is either correct, or clamped. All -0 cases have been translated + // to a clamped case. masm.round(input, output, &bail, tmp); if (!bailoutFrom(&bail, lir->snapshot())) return false; @@ -1241,8 +1255,8 @@ CodeGeneratorARM::visitRoundF(LRoundF *lir) Register output = ToRegister(lir->output()); FloatRegister tmp = ToFloatRegister(lir->temp()); Label bail; - // Output is either correct, or clamped. All -0 cases have been translated to a clamped - // case.a + // Output is either correct, or clamped. All -0 cases have been translated + // to a clamped case. masm.roundf(input, output, &bail, tmp); if (!bailoutFrom(&bail, lir->snapshot())) return false; @@ -1339,9 +1353,9 @@ CodeGeneratorARM::visitBox(LBox *box) JS_ASSERT(!box->getOperand(0)->isConstant()); - // On x86, the input operand and the output payload have the same - // virtual register. All that needs to be written is the type tag for - // the type definition. + // On x86, the input operand and the output payload have the same virtual + // register. All that needs to be written is the type tag for the type + // definition. masm.ma_mov(Imm32(MIRTypeToTag(box->type())), ToRegister(type)); return true; } @@ -1414,11 +1428,10 @@ CodeGeneratorARM::visitTestDAndBranch(LTestDAndBranch *test) MBasicBlock *ifTrue = test->ifTrue(); MBasicBlock *ifFalse = test->ifFalse(); - // If the compare set the 0 bit, then the result - // is definately false. + // If the compare set the 0 bit, then the result is definately false. jumpToBlock(ifFalse, Assembler::Zero); - // it is also false if one of the operands is NAN, which is - // shown as Overflow. + // It is also false if one of the operands is NAN, which is shown as + // Overflow. jumpToBlock(ifFalse, Assembler::Overflow); jumpToBlock(ifTrue); return true; @@ -1433,11 +1446,10 @@ CodeGeneratorARM::visitTestFAndBranch(LTestFAndBranch *test) MBasicBlock *ifTrue = test->ifTrue(); MBasicBlock *ifFalse = test->ifFalse(); - // If the compare set the 0 bit, then the result - // is definately false. + // If the compare set the 0 bit, then the result is definately false. jumpToBlock(ifFalse, Assembler::Zero); - // it is also false if one of the operands is NAN, which is - // shown as Overflow. + // It is also false if one of the operands is NAN, which is shown as + // Overflow. jumpToBlock(ifFalse, Assembler::Overflow); jumpToBlock(ifTrue); return true; @@ -1629,22 +1641,22 @@ CodeGeneratorARM::visitNotI(LNotI *ins) bool CodeGeneratorARM::visitNotD(LNotD *ins) { - // Since this operation is not, we want to set a bit if - // the double is falsey, which means 0.0, -0.0 or NaN. - // when comparing with 0, an input of 0 will set the Z bit (30) - // and NaN will set the V bit (28) of the APSR. + // Since this operation is not, we want to set a bit if the double is + // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of + // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR. FloatRegister opd = ToFloatRegister(ins->input()); Register dest = ToRegister(ins->output()); - // Do the compare + // Do the compare. masm.ma_vcmpz(opd); // TODO There are three variations here to compare performance-wise. bool nocond = true; if (nocond) { - // Load the value into the dest register + // Load the value into the dest register. masm.as_vmrs(dest); masm.ma_lsr(Imm32(28), dest, dest); - masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30 + // 28 + 2 = 30 + masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr); masm.ma_and(Imm32(1), dest); } else { masm.as_vmrs(pc); @@ -1658,22 +1670,22 @@ CodeGeneratorARM::visitNotD(LNotD *ins) bool CodeGeneratorARM::visitNotF(LNotF *ins) { - // Since this operation is not, we want to set a bit if - // the double is falsey, which means 0.0, -0.0 or NaN. - // when comparing with 0, an input of 0 will set the Z bit (30) - // and NaN will set the V bit (28) of the APSR. + // Since this operation is not, we want to set a bit if the double is + // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of + // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR. FloatRegister opd = ToFloatRegister(ins->input()); Register dest = ToRegister(ins->output()); - // Do the compare + // Do the compare. masm.ma_vcmpz_f32(opd); // TODO There are three variations here to compare performance-wise. bool nocond = true; if (nocond) { - // Load the value into the dest register + // Load the value into the dest register. masm.as_vmrs(dest); masm.ma_lsr(Imm32(28), dest, dest); - masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30 + // 28 + 2 = 30 + masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr); masm.ma_and(Imm32(1), dest); } else { masm.as_vmrs(pc); @@ -1726,15 +1738,14 @@ CodeGeneratorARM::visitGuardClass(LGuardClass *guard) bool CodeGeneratorARM::generateInvalidateEpilogue() { - // Ensure that there is enough space in the buffer for the OsiPoint - // patching to occur. Otherwise, we could overwrite the invalidation - // epilogue. - for (size_t i = 0; i < sizeof(void *); i+= Assembler::nopSize()) + // Ensure that there is enough space in the buffer for the OsiPoint patching + // to occur. Otherwise, we could overwrite the invalidation epilogue. + for (size_t i = 0; i < sizeof(void *); i += Assembler::NopSize()) masm.nop(); masm.bind(&invalidate_); - // Push the return address of the point that we bailed out at onto the stack + // Push the return address of the point that we bailed out at onto the stack. masm.Push(lr); // Push the Ion script onto the stack (when we determine what that pointer is). @@ -1743,8 +1754,8 @@ CodeGeneratorARM::generateInvalidateEpilogue() masm.branch(thunk); - // We should never reach this point in JIT code -- the invalidation thunk should - // pop the invalidated JS frame and return directly to its caller. + // We should never reach this point in JIT code -- the invalidation thunk + // should pop the invalidated JS frame and return directly to its caller. masm.assumeUnreachable("Should have returned directly to its caller instead of here."); return true; } diff --git a/js/src/jit/arm/CodeGenerator-arm.h b/js/src/jit/arm/CodeGenerator-arm.h index d55d71befd1f..9a3dedfff65e 100644 --- a/js/src/jit/arm/CodeGenerator-arm.h +++ b/js/src/jit/arm/CodeGenerator-arm.h @@ -26,9 +26,9 @@ class CodeGeneratorARM : public CodeGeneratorShared // Label for the common return path. NonAssertingLabel returnLabel_; NonAssertingLabel deoptLabel_; - // ugh. this is not going to be pretty to move over. - // stack slotted variables are not useful on arm. - // it looks like this will need to return one of two types. + // Ugh. This is not going to be pretty to move over. Stack slotted variables + // are not useful on arm. It looks like this will need to return one of two + // types. inline Operand ToOperand(const LAllocation &a) { if (a.isGeneralReg()) return Operand(a.toGeneralReg()->reg()); diff --git a/js/src/jit/arm/LIR-arm.h b/js/src/jit/arm/LIR-arm.h index afa5781974ee..9a95bc24033f 100644 --- a/js/src/jit/arm/LIR-arm.h +++ b/js/src/jit/arm/LIR-arm.h @@ -144,10 +144,10 @@ class LDivI : public LBinaryMath<1> // takes two arguments (dividend in r0, divisor in r1). The LInstruction gets // encoded such that the divisor and dividend are passed in their apropriate // registers and end their life at the start of the instruction by the use of -// useFixedAtStart. The result is returned in r0 and the other three registers -// that can be trashed are marked as temps. For the time being, the link +// useFixedAtStart. The result is returned in r0 and the other three registers +// that can be trashed are marked as temps. For the time being, the link // register is not marked as trashed because we never allocate to the link -// register. The FP registers are not trashed. +// register. The FP registers are not trashed. class LSoftDivI : public LBinaryMath<3> { public: @@ -304,7 +304,7 @@ class LPowHalfD : public LInstructionHelper<1, 1, 0> } }; -// Takes a tableswitch with an integer to decide +// Takes a tableswitch with an integer to decide. class LTableSwitch : public LInstructionHelper<0, 1, 1> { public: @@ -332,7 +332,7 @@ class LTableSwitch : public LInstructionHelper<0, 1, 1> } }; -// Takes a tableswitch with an integer to decide +// Takes a tableswitch with an integer to decide. class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 2> { public: diff --git a/js/src/jit/arm/MacroAssembler-arm.cpp b/js/src/jit/arm/MacroAssembler-arm.cpp index c9148fee00c6..08d3a6528250 100644 --- a/js/src/jit/arm/MacroAssembler-arm.cpp +++ b/js/src/jit/arm/MacroAssembler-arm.cpp @@ -26,8 +26,8 @@ bool isValueDTRDCandidate(ValueOperand &val) { // In order to be used for a DTRD memory function, the two target registers - // need to be a) Adjacent, with the tag larger than the payload, and - // b) Aligned to a multiple of two. + // need to be a) Adjacent, with the tag larger than the payload, and b) + // Aligned to a multiple of two. if ((val.typeReg().code() != (val.payloadReg().code() + 1))) return false; if ((val.payloadReg().code() & 1) != 0) @@ -46,7 +46,7 @@ MacroAssemblerARM::convertBoolToInt32(Register source, Register dest) void MacroAssemblerARM::convertInt32ToDouble(Register src, FloatRegister dest_) { - // direct conversions aren't possible. + // Direct conversions aren't possible. VFPRegister dest = VFPRegister(dest_); as_vxfer(src, InvalidReg, dest.sintOverlay(), CoreToFloat); @@ -63,7 +63,7 @@ MacroAssemblerARM::convertInt32ToDouble(const Address &src, FloatRegister dest) void MacroAssemblerARM::convertUInt32ToDouble(Register src, FloatRegister dest_) { - // direct conversions aren't possible. + // Direct conversions aren't possible. VFPRegister dest = VFPRegister(dest_); as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat); as_vcvt(dest, dest.uintOverlay()); @@ -72,7 +72,7 @@ MacroAssemblerARM::convertUInt32ToDouble(Register src, FloatRegister dest_) void MacroAssemblerARM::convertUInt32ToFloat32(Register src, FloatRegister dest_) { - // direct conversions aren't possible. + // Direct conversions aren't possible. VFPRegister dest = VFPRegister(dest_); as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat); as_vcvt(VFPRegister(dest).singleOverlay(), dest.uintOverlay()); @@ -84,13 +84,14 @@ void MacroAssemblerARM::convertDoubleToFloat32(FloatRegister src, FloatRegister as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src), false, c); } -// there are two options for implementing emitTruncateDouble. -// 1) convert the floating point value to an integer, if it did not fit, -// then it was clamped to INT_MIN/INT_MAX, and we can test it. -// NOTE: if the value really was supposed to be INT_MAX / INT_MIN -// then it will be wrong. -// 2) convert the floating point value to an integer, if it did not fit, -// then it set one or two bits in the fpcsr. Check those. +// There are two options for implementing emitTruncateDouble: +// +// 1. Convert the floating point value to an integer, if it did not fit, then it +// was clamped to INT_MIN/INT_MAX, and we can test it. NOTE: if the value +// really was supposed to be INT_MAX / INT_MIN then it will be wrong. +// +// 2. Convert the floating point value to an integer, if it did not fit, then it +// set one or two bits in the fpcsr. Check those. void MacroAssemblerARM::branchTruncateDouble(FloatRegister src, Register dest, Label *fail) { @@ -109,12 +110,12 @@ void MacroAssemblerARM::convertDoubleToInt32(FloatRegister src, Register dest, Label *fail, bool negativeZeroCheck) { - // convert the floating point value to an integer, if it did not fit, - // then when we convert it *back* to a float, it will have a - // different value, which we can test. + // Convert the floating point value to an integer, if it did not fit, then + // when we convert it *back* to a float, it will have a different value, + // which we can test. FloatRegister ScratchSIntReg = ScratchDoubleReg.sintOverlay(); ma_vcvt_F64_I32(src, ScratchSIntReg); - // move the value into the dest register. + // Move the value into the dest register. ma_vxfer(ScratchSIntReg, dest); ma_vcvt_I32_F64(ScratchSIntReg, ScratchDoubleReg); ma_vcmp(src, ScratchDoubleReg); @@ -123,9 +124,9 @@ MacroAssemblerARM::convertDoubleToInt32(FloatRegister src, Register dest, if (negativeZeroCheck) { ma_cmp(dest, Imm32(0)); - // Test and bail for -0.0, when integer result is 0 - // Move the top word of the double into the output reg, if it is non-zero, - // then the original value was -0.0 + // Test and bail for -0.0, when integer result is 0. Move the top word + // of the double into the output reg, if it is non-zero, then the + // original value was -0.0. as_vxfer(dest, InvalidReg, src, FloatToCore, Assembler::Equal, 1); ma_cmp(dest, Imm32(0x80000000), Assembler::Equal); ma_b(fail, Assembler::Equal); @@ -139,11 +140,11 @@ void MacroAssemblerARM::convertFloat32ToInt32(FloatRegister src, Register dest, Label *fail, bool negativeZeroCheck) { - // convert the floating point value to an integer, if it did not fit, - // then when we convert it *back* to a float, it will have a - // different value, which we can test. + // Convert the floating point value to an integer, if it did not fit, then + // when we convert it *back* to a float, it will have a different value, + // which we can test. ma_vcvt_F32_I32(src, ScratchFloat32Reg.sintOverlay()); - // move the value into the dest register. + // Move the value into the dest register. ma_vxfer(ScratchFloat32Reg, dest); ma_vcvt_I32_F32(ScratchFloat32Reg.sintOverlay(), ScratchFloat32Reg); ma_vcmp_f32(src, ScratchFloat32Reg); @@ -152,9 +153,9 @@ MacroAssemblerARM::convertFloat32ToInt32(FloatRegister src, Register dest, if (negativeZeroCheck) { ma_cmp(dest, Imm32(0)); - // Test and bail for -0.0, when integer result is 0 - // Move the float into the output reg, and if it is non-zero then - // the original value was -0.0 + // Test and bail for -0.0, when integer result is 0. Move the float into + // the output reg, and if it is non-zero then the original value was + // -0.0 as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore, Assembler::Equal, 0); ma_cmp(dest, Imm32(0x80000000), Assembler::Equal); ma_b(fail, Assembler::Equal); @@ -177,7 +178,7 @@ MacroAssemblerARM::branchTruncateFloat32(FloatRegister src, Register dest, Label void MacroAssemblerARM::convertInt32ToFloat32(Register src, FloatRegister dest) { - // direct conversions aren't possible. + // Direct conversions aren't possible. as_vxfer(src, InvalidReg, dest.sintOverlay(), CoreToFloat); as_vcvt(dest.singleOverlay(), dest.sintOverlay()); @@ -245,16 +246,14 @@ MacroAssemblerARM::alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op, if ((sc == SetCond && ! condsAreSafe(op)) || !can_dbl(op)) return false; ALUOp interop = getDestVariant(op); - Imm8::TwoImm8mData both = Imm8::encodeTwoImms(imm.value); + Imm8::TwoImm8mData both = Imm8::EncodeTwoImms(imm.value); if (both.fst.invalid) return false; - // for the most part, there is no good reason to set the condition - // codes for the first instruction. - // we can do better things if the second instruction doesn't - // have a dest, such as check for overflow by doing first operation - // don't do second operation if first operation overflowed. - // this preserves the overflow condition code. - // unfortunately, it is horribly brittle. + // For the most part, there is no good reason to set the condition codes for + // the first instruction. We can do better things if the second instruction + // doesn't have a dest, such as check for overflow by doing first operation + // don't do second operation if first operation overflowed. This preserves + // the overflow condition code. Unfortunately, it is horribly brittle. as_alu(ScratchRegister, src1, both.fst, interop, NoSetCond, c); as_alu(dest, ScratchRegister, both.snd, op, sc, c); return true; @@ -266,48 +265,44 @@ MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest, ALUOp op, SetCond_ sc, Condition c) { - // As it turns out, if you ask for a compare-like instruction - // you *probably* want it to set condition codes. + // As it turns out, if you ask for a compare-like instruction you *probably* + // want it to set condition codes. if (dest == InvalidReg) JS_ASSERT(sc == SetCond); - // The operator gives us the ability to determine how - // this can be used. + // The operator gives us the ability to determine how this can be used. Imm8 imm8 = Imm8(imm.value); - // ONE INSTRUCTION: - // If we can encode it using an imm8m, then do so. + // One instruction: If we can encode it using an imm8m, then do so. if (!imm8.invalid) { as_alu(dest, src1, imm8, op, sc, c); return; } - // ONE INSTRUCTION, NEGATED: + // One instruction, negated: Imm32 negImm = imm; Register negDest; ALUOp negOp = ALUNeg(op, dest, &negImm, &negDest); Imm8 negImm8 = Imm8(negImm.value); - // add r1, r2, -15 can be replaced with - // sub r1, r2, 15 - // for bonus points, dest can be replaced (nearly always invalid => ScratchRegister) - // This is useful if we wish to negate tst. tst has an invalid (aka not used) dest, - // but its negation is bic *requires* a dest. We can accomodate, but it will need to clobber - // *something*, and the scratch register isn't being used, so... - if (negOp != op_invalid && !negImm8.invalid) { + // 'add r1, r2, -15' can be replaced with 'sub r1, r2, 15'. For bonus + // points, dest can be replaced (nearly always invalid => ScratchRegister) + // This is useful if we wish to negate tst. tst has an invalid (aka not + // used) dest, but its negation is bic *requires* a dest. We can accomodate, + // but it will need to clobber *something*, and the scratch register isn't + // being used, so... + if (negOp != OpInvalid && !negImm8.invalid) { as_alu(negDest, src1, negImm8, negOp, sc, c); return; } if (HasMOVWT()) { - // If the operation is a move-a-like then we can try to use movw to - // move the bits into the destination. Otherwise, we'll need to - // fall back on a multi-instruction format :( - // movw/movt don't set condition codes, so don't hold your breath. - if (sc == NoSetCond && (op == op_mov || op == op_mvn)) { - // ARMv7 supports movw/movt. movw zero-extends - // its 16 bit argument, so we can set the register - // this way. - // movt leaves the bottom 16 bits in tact, so - // it is unsuitable to move a constant that - if (op == op_mov && ((imm.value & ~ 0xffff) == 0)) { + // If the operation is a move-a-like then we can try to use movw to move + // the bits into the destination. Otherwise, we'll need to fall back on + // a multi-instruction format :( + // movw/movt does not set condition codes, so don't hold your breath. + if (sc == NoSetCond && (op == OpMov || op == OpMvn)) { + // ARMv7 supports movw/movt. movw zero-extends its 16 bit argument, + // so we can set the register this way. movt leaves the bottom 16 + // bits in tact, so it is unsuitable to move a constant that + if (op == OpMov && ((imm.value & ~ 0xffff) == 0)) { JS_ASSERT(src1 == InvalidReg); as_movw(dest, (uint16_t)imm.value, c); return; @@ -315,76 +310,72 @@ MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest, // If they asked for a mvn rfoo, imm, where ~imm fits into 16 bits // then do it. - if (op == op_mvn && (((~imm.value) & ~ 0xffff) == 0)) { + if (op == OpMvn && (((~imm.value) & ~ 0xffff) == 0)) { JS_ASSERT(src1 == InvalidReg); as_movw(dest, (uint16_t)~imm.value, c); return; } - // TODO: constant dedup may enable us to add dest, r0, 23 *if* - // we are attempting to load a constant that looks similar to one - // that already exists - // If it can't be done with a single movw - // then we *need* to use two instructions - // since this must be some sort of a move operation, we can just use - // a movw/movt pair and get the whole thing done in two moves. This - // does not work for ops like add, sinc we'd need to do - // movw tmp; movt tmp; add dest, tmp, src1 - if (op == op_mvn) + // TODO: constant dedup may enable us to add dest, r0, 23 *if* we + // are attempting to load a constant that looks similar to one that + // already exists. If it can't be done with a single movw then we + // *need* to use two instructions since this must be some sort of a + // move operation, we can just use a movw/movt pair and get the + // whole thing done in two moves. This does not work for ops like + // add, since we'd need to do: movw tmp; movt tmp; add dest, tmp, + // src1. + if (op == OpMvn) imm.value = ~imm.value; as_movw(dest, imm.value & 0xffff, c); as_movt(dest, (imm.value >> 16) & 0xffff, c); return; } - // If we weren't doing a movalike, a 16 bit immediate - // will require 2 instructions. With the same amount of - // space and (less)time, we can do two 8 bit operations, reusing - // the dest register. e.g. - // movw tmp, 0xffff; add dest, src, tmp ror 4 + // If we weren't doing a movalike, a 16 bit immediate will require 2 + // instructions. With the same amount of space and (less)time, we can do + // two 8 bit operations, reusing the dest register. e.g. + // movw tmp, 0xffff; add dest, src, tmp ror 4 // vs. - // add dest, src, 0xff0; add dest, dest, 0xf000000f - // it turns out that there are some immediates that we miss with the - // second approach. A sample value is: add dest, src, 0x1fffe - // this can be done by movw tmp, 0xffff; add dest, src, tmp lsl 1 - // since imm8m's only get even offsets, we cannot encode this. - // I'll try to encode as two imm8's first, since they are faster. - // Both operations should take 1 cycle, where as add dest, tmp ror 4 - // takes two cycles to execute. + // add dest, src, 0xff0; add dest, dest, 0xf000000f + // + // It turns out that there are some immediates that we miss with the + // second approach. A sample value is: add dest, src, 0x1fffe this can + // be done by movw tmp, 0xffff; add dest, src, tmp lsl 1 since imm8m's + // only get even offsets, we cannot encode this. I'll try to encode as + // two imm8's first, since they are faster. Both operations should take + // 1 cycle, where as add dest, tmp ror 4 takes two cycles to execute. } - // Either a) this isn't ARMv7 b) this isn't a move - // start by attempting to generate a two instruction form. - // Some things cannot be made into two-inst forms correctly. - // namely, adds dest, src, 0xffff. - // Since we want the condition codes (and don't know which ones will - // be checked), we need to assume that the overflow flag will be checked - // and add{,s} dest, src, 0xff00; add{,s} dest, dest, 0xff is not - // guaranteed to set the overflow flag the same as the (theoretical) - // one instruction variant. + // Either a) this isn't ARMv7 b) this isn't a move start by attempting to + // generate a two instruction form. Some things cannot be made into two-inst + // forms correctly. Namely, adds dest, src, 0xffff. Since we want the + // condition codes (and don't know which ones will be checked), we need to + // assume that the overflow flag will be checked and add{,s} dest, src, + // 0xff00; add{,s} dest, dest, 0xff is not guaranteed to set the overflow + // flag the same as the (theoretical) one instruction variant. if (alu_dbl(src1, imm, dest, op, sc, c)) return; // And try with its negative. - if (negOp != op_invalid && + if (negOp != OpInvalid && alu_dbl(src1, negImm, negDest, negOp, sc, c)) return; - // Well, damn. We can use two 16 bit mov's, then do the op - // or we can do a single load from a pool then op. + // Well, damn. We can use two 16 bit mov's, then do the op or we can do a + // single load from a pool then op. if (HasMOVWT()) { - // Try to load the immediate into a scratch register - // then use that + // Try to load the immediate into a scratch register then use that as_movw(ScratchRegister, imm.value & 0xffff, c); if ((imm.value >> 16) != 0) as_movt(ScratchRegister, (imm.value >> 16) & 0xffff, c); } else { - // Going to have to use a load. If the operation is a move, then just move it into the - // destination register - if (op == op_mov) { + // Going to have to use a load. If the operation is a move, then just + // move it into the destination register + if (op == OpMov) { as_Imm32Pool(dest, imm.value, c); return; } else { - // If this isn't just going into a register, then stick it in a temp, and then proceed. + // If this isn't just going into a register, then stick it in a + // temp, and then proceed. as_Imm32Pool(ScratchRegister, imm.value, c); } } @@ -425,16 +416,16 @@ MacroAssemblerARM::ma_movPatchable(Imm32 imm_, Register dest, Assembler::Conditi { int32_t imm = imm_.value; if (i) { - // Make sure the current instruction is not an artificial guard - // inserted by the assembler buffer. + // Make sure the current instruction is not an artificial guard inserted + // by the assembler buffer. i = i->skipPool(); } switch(rs) { case L_MOVWT: as_movw(dest, Imm16(imm & 0xffff), c, i); - // i can be nullptr here. that just means "insert in the next in sequence." - // NextInst is special cased to not do anything when it is passed nullptr, so - // two consecutive instructions will be inserted. + // 'i' can be nullptr here. That just means "insert in the next in + // sequence." NextInst is special cased to not do anything when it is + // passed nullptr, so two consecutive instructions will be inserted. i = NextInst(i); as_movt(dest, Imm16(imm >> 16 & 0xffff), c, i); break; @@ -466,14 +457,14 @@ void MacroAssemblerARM::ma_mov(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) { - ma_alu(InvalidReg, imm, dest, op_mov, sc, c); + ma_alu(InvalidReg, imm, dest, OpMov, sc, c); } void MacroAssemblerARM::ma_mov(ImmWord imm, Register dest, SetCond_ sc, Assembler::Condition c) { - ma_alu(InvalidReg, Imm32(imm.value), dest, op_mov, sc, c); + ma_alu(InvalidReg, Imm32(imm.value), dest, OpMov, sc, c); } void @@ -491,7 +482,7 @@ MacroAssemblerARM::ma_mov(ImmGCPtr ptr, Register dest) ma_movPatchable(Imm32(uintptr_t(ptr.value)), dest, Always, rs); } - // Shifts (just a move with a shifting op2) +// Shifts (just a move with a shifting op2) void MacroAssemblerARM::ma_lsl(Imm32 shift, Register src, Register dst) { @@ -517,7 +508,8 @@ MacroAssemblerARM::ma_rol(Imm32 shift, Register src, Register dst) { as_mov(dst, rol(src, shift.value)); } - // Shifts (just a move with a shifting op2) + +// Shifts (just a move with a shifting op2) void MacroAssemblerARM::ma_lsl(Register shift, Register src, Register dst) { @@ -545,18 +537,17 @@ MacroAssemblerARM::ma_rol(Register shift, Register src, Register dst) as_mov(dst, ror(src, ScratchRegister)); } - // Move not (dest <- ~src) - +// Move not (dest <- ~src) void MacroAssemblerARM::ma_mvn(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) { - ma_alu(InvalidReg, imm, dest, op_mvn, sc, c); + ma_alu(InvalidReg, imm, dest, OpMvn, sc, c); } void MacroAssemblerARM::ma_mvn(Register src1, Register dest, SetCond_ sc, Assembler::Condition c) { - as_alu(dest, InvalidReg, O2Reg(src1), op_mvn, sc, c); + as_alu(dest, InvalidReg, O2Reg(src1), OpMvn, sc, c); } // Negate (dest <- -src), src is a register, rather than a general op2. @@ -581,21 +572,20 @@ MacroAssemblerARM::ma_and(Register src1, Register src2, Register dest, void MacroAssemblerARM::ma_and(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) { - ma_alu(dest, imm, dest, op_and, sc, c); + ma_alu(dest, imm, dest, OpAnd, sc, c); } void MacroAssemblerARM::ma_and(Imm32 imm, Register src1, Register dest, SetCond_ sc, Assembler::Condition c) { - ma_alu(src1, imm, dest, op_and, sc, c); + ma_alu(src1, imm, dest, OpAnd, sc, c); } - // Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2). void MacroAssemblerARM::ma_bic(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) { - ma_alu(dest, imm, dest, op_bic, sc, c); + ma_alu(dest, imm, dest, OpBic, sc, c); } // Exclusive or. @@ -613,13 +603,13 @@ MacroAssemblerARM::ma_eor(Register src1, Register src2, Register dest, void MacroAssemblerARM::ma_eor(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) { - ma_alu(dest, imm, dest, op_eor, sc, c); + ma_alu(dest, imm, dest, OpEor, sc, c); } void MacroAssemblerARM::ma_eor(Imm32 imm, Register src1, Register dest, SetCond_ sc, Assembler::Condition c) { - ma_alu(src1, imm, dest, op_eor, sc, c); + ma_alu(src1, imm, dest, OpEor, sc, c); } // Or. @@ -637,13 +627,13 @@ MacroAssemblerARM::ma_orr(Register src1, Register src2, Register dest, void MacroAssemblerARM::ma_orr(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) { - ma_alu(dest, imm, dest, op_orr, sc, c); + ma_alu(dest, imm, dest, OpOrr, sc, c); } void MacroAssemblerARM::ma_orr(Imm32 imm, Register src1, Register dest, SetCond_ sc, Assembler::Condition c) { - ma_alu(src1, imm, dest, op_orr, sc, c); + ma_alu(src1, imm, dest, OpOrr, sc, c); } // Arithmetic-based ops. @@ -651,128 +641,128 @@ MacroAssemblerARM::ma_orr(Imm32 imm, Register src1, Register dest, void MacroAssemblerARM::ma_adc(Imm32 imm, Register dest, SetCond_ sc, Condition c) { - ma_alu(dest, imm, dest, op_adc, sc, c); + ma_alu(dest, imm, dest, OpAdc, sc, c); } void MacroAssemblerARM::ma_adc(Register src, Register dest, SetCond_ sc, Condition c) { - as_alu(dest, dest, O2Reg(src), op_adc, sc, c); + as_alu(dest, dest, O2Reg(src), OpAdc, sc, c); } void MacroAssemblerARM::ma_adc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) { - as_alu(dest, src1, O2Reg(src2), op_adc, sc, c); + as_alu(dest, src1, O2Reg(src2), OpAdc, sc, c); } // Add. void MacroAssemblerARM::ma_add(Imm32 imm, Register dest, SetCond_ sc, Condition c) { - ma_alu(dest, imm, dest, op_add, sc, c); + ma_alu(dest, imm, dest, OpAdd, sc, c); } void MacroAssemblerARM::ma_add(Register src1, Register dest, SetCond_ sc, Condition c) { - ma_alu(dest, O2Reg(src1), dest, op_add, sc, c); + ma_alu(dest, O2Reg(src1), dest, OpAdd, sc, c); } void MacroAssemblerARM::ma_add(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) { - as_alu(dest, src1, O2Reg(src2), op_add, sc, c); + as_alu(dest, src1, O2Reg(src2), OpAdd, sc, c); } void MacroAssemblerARM::ma_add(Register src1, Operand op, Register dest, SetCond_ sc, Condition c) { - ma_alu(src1, op, dest, op_add, sc, c); + ma_alu(src1, op, dest, OpAdd, sc, c); } void MacroAssemblerARM::ma_add(Register src1, Imm32 op, Register dest, SetCond_ sc, Condition c) { - ma_alu(src1, op, dest, op_add, sc, c); + ma_alu(src1, op, dest, OpAdd, sc, c); } // Subtract with carry. void MacroAssemblerARM::ma_sbc(Imm32 imm, Register dest, SetCond_ sc, Condition c) { - ma_alu(dest, imm, dest, op_sbc, sc, c); + ma_alu(dest, imm, dest, OpSbc, sc, c); } void MacroAssemblerARM::ma_sbc(Register src1, Register dest, SetCond_ sc, Condition c) { - as_alu(dest, dest, O2Reg(src1), op_sbc, sc, c); + as_alu(dest, dest, O2Reg(src1), OpSbc, sc, c); } void MacroAssemblerARM::ma_sbc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) { - as_alu(dest, src1, O2Reg(src2), op_sbc, sc, c); + as_alu(dest, src1, O2Reg(src2), OpSbc, sc, c); } // Subtract. void MacroAssemblerARM::ma_sub(Imm32 imm, Register dest, SetCond_ sc, Condition c) { - ma_alu(dest, imm, dest, op_sub, sc, c); + ma_alu(dest, imm, dest, OpSub, sc, c); } void MacroAssemblerARM::ma_sub(Register src1, Register dest, SetCond_ sc, Condition c) { - ma_alu(dest, Operand(src1), dest, op_sub, sc, c); + ma_alu(dest, Operand(src1), dest, OpSub, sc, c); } void MacroAssemblerARM::ma_sub(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) { - ma_alu(src1, Operand(src2), dest, op_sub, sc, c); + ma_alu(src1, Operand(src2), dest, OpSub, sc, c); } void MacroAssemblerARM::ma_sub(Register src1, Operand op, Register dest, SetCond_ sc, Condition c) { - ma_alu(src1, op, dest, op_sub, sc, c); + ma_alu(src1, op, dest, OpSub, sc, c); } void MacroAssemblerARM::ma_sub(Register src1, Imm32 op, Register dest, SetCond_ sc, Condition c) { - ma_alu(src1, op, dest, op_sub, sc, c); + ma_alu(src1, op, dest, OpSub, sc, c); } // Severse subtract. void MacroAssemblerARM::ma_rsb(Imm32 imm, Register dest, SetCond_ sc, Condition c) { - ma_alu(dest, imm, dest, op_rsb, sc, c); + ma_alu(dest, imm, dest, OpRsb, sc, c); } void MacroAssemblerARM::ma_rsb(Register src1, Register dest, SetCond_ sc, Condition c) { - as_alu(dest, dest, O2Reg(src1), op_add, sc, c); + as_alu(dest, dest, O2Reg(src1), OpAdd, sc, c); } void MacroAssemblerARM::ma_rsb(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) { - as_alu(dest, src1, O2Reg(src2), op_rsb, sc, c); + as_alu(dest, src1, O2Reg(src2), OpRsb, sc, c); } void MacroAssemblerARM::ma_rsb(Register src1, Imm32 op2, Register dest, SetCond_ sc, Condition c) { - ma_alu(src1, op2, dest, op_rsb, sc, c); + ma_alu(src1, op2, dest, OpRsb, sc, c); } // Reverse subtract with carry. void MacroAssemblerARM::ma_rsc(Imm32 imm, Register dest, SetCond_ sc, Condition c) { - ma_alu(dest, imm, dest, op_rsc, sc, c); + ma_alu(dest, imm, dest, OpRsc, sc, c); } void MacroAssemblerARM::ma_rsc(Register src1, Register dest, SetCond_ sc, Condition c) { - as_alu(dest, dest, O2Reg(src1), op_rsc, sc, c); + as_alu(dest, dest, O2Reg(src1), OpRsc, sc, c); } void MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) { - as_alu(dest, src1, O2Reg(src2), op_rsc, sc, c); + as_alu(dest, src1, O2Reg(src2), OpRsc, sc, c); } // Compares/tests. @@ -780,12 +770,12 @@ MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest, SetCond_ void MacroAssemblerARM::ma_cmn(Register src1, Imm32 imm, Condition c) { - ma_alu(src1, imm, InvalidReg, op_cmn, SetCond, c); + ma_alu(src1, imm, InvalidReg, OpCmn, SetCond, c); } void MacroAssemblerARM::ma_cmn(Register src1, Register src2, Condition c) { - as_alu(InvalidReg, src2, O2Reg(src1), op_cmn, SetCond, c); + as_alu(InvalidReg, src2, O2Reg(src1), OpCmn, SetCond, c); } void MacroAssemblerARM::ma_cmn(Register src1, Operand op, Condition c) @@ -797,7 +787,7 @@ MacroAssemblerARM::ma_cmn(Register src1, Operand op, Condition c) void MacroAssemblerARM::ma_cmp(Register src1, Imm32 imm, Condition c) { - ma_alu(src1, imm, InvalidReg, op_cmp, SetCond, c); + ma_alu(src1, imm, InvalidReg, OpCmp, SetCond, c); } void @@ -833,11 +823,11 @@ MacroAssemblerARM::ma_cmp(Register src1, Register src2, Condition c) as_cmp(src1, O2Reg(src2), c); } -// Test for equality, (src1^src2). +// Test for equality, (src1 ^ src2). void MacroAssemblerARM::ma_teq(Register src1, Imm32 imm, Condition c) { - ma_alu(src1, imm, InvalidReg, op_teq, SetCond, c); + ma_alu(src1, imm, InvalidReg, OpTeq, SetCond, c); } void MacroAssemblerARM::ma_teq(Register src1, Register src2, Condition c) @@ -855,7 +845,7 @@ MacroAssemblerARM::ma_teq(Register src1, Operand op, Condition c) void MacroAssemblerARM::ma_tst(Register src1, Imm32 imm, Condition c) { - ma_alu(src1, imm, InvalidReg, op_tst, SetCond, c); + ma_alu(src1, imm, InvalidReg, OpTst, SetCond, c); } void MacroAssemblerARM::ma_tst(Register src1, Register src2, Condition c) @@ -884,8 +874,8 @@ MacroAssemblerARM::ma_mul(Register src1, Imm32 imm, Register dest) Assembler::Condition MacroAssemblerARM::ma_check_mul(Register src1, Register src2, Register dest, Condition cond) { - // TODO: this operation is illegal on armv6 and earlier if src2 == ScratchRegister - // or src2 == dest. + // TODO: this operation is illegal on armv6 and earlier if src2 == + // ScratchRegister or src2 == dest. if (cond == Equal || cond == NotEqual) { as_smull(ScratchRegister, dest, src1, src2, SetCond); return cond; @@ -922,33 +912,36 @@ void MacroAssemblerARM::ma_mod_mask(Register src, Register dest, Register hold, Register tmp, int32_t shift) { - // MATH: // We wish to compute x % (1< 0, store sum - C back into sum, thus performing a modulus. ma_mov(secondScratchReg_, dest, NoSetCond, NotSigned); - // Get rid of the bits that we extracted before, and set the condition codes + // Get rid of the bits that we extracted before, and set the condition codes. as_mov(tmp, lsr(tmp, shift), SetCond); // If the shift produced zero, finish, otherwise, continue in the loop. ma_b(&head, NonZero); - // Check the hold to see if we need to negate the result. Hold can only be 1 or -1, - // so this will never set the 0 flag. + // Check the hold to see if we need to negate the result. Hold can only be + // 1 or -1, so this will never set the 0 flag. ma_cmp(hold, Imm32(0)); - // If the hold was non-zero, negate the result to be in line with what JS wants - // this will set the condition codes if we try to negate + // If the hold was non-zero, negate the result to be in line with what JS + // wants this will set the condition codes if we try to negate. ma_rsb(Imm32(0), dest, SetCond, Signed); - // Since the Zero flag is not set by the compare, we can *only* set the Zero flag - // in the rsb, so Zero is set iff we negated zero (e.g. the result of the computation was -0.0). - + // Since the Zero flag is not set by the compare, we can *only* set the Zero + // flag in the rsb, so Zero is set iff we negated zero (e.g. the result of + // the computation was -0.0). } void @@ -996,7 +990,7 @@ MacroAssemblerARM::ma_umod(Register num, Register div, Register dest) as_mls(dest, num, ScratchRegister, div); } -// division +// Division void MacroAssemblerARM::ma_sdiv(Register num, Register div, Register dest, Condition cond) { @@ -1130,7 +1124,7 @@ MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned, Index mode, Assembler::Condition cc) { int off = offset.value; - // we can encode this as a standard ldr... MAKE IT SO + // We can encode this as a standard ldr. if (size == 32 || (size == 8 && !IsSigned) ) { if (off < 4096 && off > -4096) { // This encodes as a single instruction, Emulating mode's behavior @@ -1141,11 +1135,12 @@ MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned, // We cannot encode this offset in a a single ldr. For mode == index, // try to encode it as |add scratch, base, imm; ldr dest, [scratch, +offset]|. // This does not wark for mode == PreIndex or mode == PostIndex. - // PreIndex is simple, just do the add into the base register first, then do - // a PreIndex'ed load. PostIndexed loads can be tricky. Normally, doing the load with - // an index of 0, then doing an add would work, but if the destination is the PC, - // you don't get to execute the instruction after the branch, which will lead to - // the base register not being updated correctly. Explicitly handle this case, without + // PreIndex is simple, just do the add into the base register first, + // then do a PreIndex'ed load. PostIndexed loads can be tricky. + // Normally, doing the load with an index of 0, then doing an add would + // work, but if the destination is the PC, you don't get to execute the + // instruction after the branch, which will lead to the base register + // not being updated correctly. Explicitly handle this case, without // doing anything fancy, then handle all of the other cases. // mode == Offset @@ -1162,9 +1157,9 @@ MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned, // add base, base, offset_lo // mov dest, scratch // PostIndex with the pc as the destination needs to be handled - // specially, since in the code below, the write into 'dest' - // is going to alter the control flow, so the following instruction would - // never get emitted. + // specially, since in the code below, the write into 'dest' is going to + // alter the control flow, so the following instruction would never get + // emitted. // // mode == PostIndex, dest != pc // ldr dest, [base], offset_lo @@ -1172,92 +1167,116 @@ MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned, if (rt == pc && mode == PostIndex && ls == IsLoad) { ma_mov(rn, ScratchRegister); - ma_alu(rn, offset, rn, op_add); + ma_alu(rn, offset, rn, OpAdd); return as_dtr(IsLoad, size, Offset, pc, DTRAddr(ScratchRegister, DtrOffImm(0)), cc); } int bottom = off & 0xfff; int neg_bottom = 0x1000 - bottom; - // For a regular offset, base == ScratchRegister does what we want. Modify the - // scratch register, leaving the actual base unscathed. + // For a regular offset, base == ScratchRegister does what we want. + // Modify the scratch register, leaving the actual base unscathed. Register base = ScratchRegister; - // For the preindex case, we want to just re-use rn as the base register, so when - // the base register is updated *before* the load, rn is updated. + // For the preindex case, we want to just re-use rn as the base + // register, so when the base register is updated *before* the load, rn + // is updated. if (mode == PreIndex) base = rn; JS_ASSERT(mode != PostIndex); - // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities. + // At this point, both off - bottom and off + neg_bottom will be + // reasonable-ish quantities. // - // Note a neg_bottom of 0x1000 can not be encoded as an immediate negative offset in the - // instruction and this occurs when bottom is zero, so this case is guarded against below. + // Note a neg_bottom of 0x1000 can not be encoded as an immediate + // negative offset in the instruction and this occurs when bottom is + // zero, so this case is guarded against below. if (off < 0) { - Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off + Operand2 sub_off = Imm8(-(off - bottom)); // sub_off = bottom - off if (!sub_off.invalid) { - as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom + // - sub_off = off - bottom + as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc); } - sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off + // sub_off = -neg_bottom - off + sub_off = Imm8(-(off + neg_bottom)); if (!sub_off.invalid && bottom != 0) { - JS_ASSERT(neg_bottom < 0x1000); // Guarded against by: bottom != 0 - as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off + // Guarded against by: bottom != 0 + JS_ASSERT(neg_bottom < 0x1000); + // - sub_off = neg_bottom + off + as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc); } } else { - Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom + // sub_off = off - bottom + Operand2 sub_off = Imm8(off - bottom); if (!sub_off.invalid) { - as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = off - bottom + // sub_off = off - bottom + as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc); } - sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off + // sub_off = neg_bottom + off + sub_off = Imm8(off + neg_bottom); if (!sub_off.invalid && bottom != 0) { - JS_ASSERT(neg_bottom < 0x1000); // Guarded against by: bottom != 0 - as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off + // Guarded against by: bottom != 0 + JS_ASSERT(neg_bottom < 0x1000); + // sub_off = neg_bottom + off + as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc); } } ma_mov(offset, ScratchRegister); return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(ScratchRegister, LSL, 0))); } else { - // should attempt to use the extended load/store instructions + // Should attempt to use the extended load/store instructions. if (off < 256 && off > -256) return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffImm(off)), cc); - // We cannot encode this offset in a single extldr. Try to encode it as + // We cannot encode this offset in a single extldr. Try to encode it as // an add scratch, base, imm; extldr dest, [scratch, +offset]. int bottom = off & 0xff; int neg_bottom = 0x100 - bottom; - // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities. + // At this point, both off - bottom and off + neg_bottom will be + // reasonable-ish quantities. // - // Note a neg_bottom of 0x100 can not be encoded as an immediate negative offset in the - // instruction and this occurs when bottom is zero, so this case is guarded against below. + // Note a neg_bottom of 0x100 can not be encoded as an immediate + // negative offset in the instruction and this occurs when bottom is + // zero, so this case is guarded against below. if (off < 0) { - Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off + // sub_off = bottom - off + Operand2 sub_off = Imm8(-(off - bottom)); if (!sub_off.invalid) { - as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom + // - sub_off = off - bottom + as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); return as_extdtr(ls, size, IsSigned, Offset, rt, EDtrAddr(ScratchRegister, EDtrOffImm(bottom)), cc); } - sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off + // sub_off = -neg_bottom - off + sub_off = Imm8(-(off + neg_bottom)); if (!sub_off.invalid && bottom != 0) { - JS_ASSERT(neg_bottom < 0x100); // Guarded against by: bottom != 0 - as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off + // Guarded against by: bottom != 0 + JS_ASSERT(neg_bottom < 0x100); + // - sub_off = neg_bottom + off + as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); return as_extdtr(ls, size, IsSigned, Offset, rt, EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)), cc); } } else { - Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom + // sub_off = off - bottom + Operand2 sub_off = Imm8(off - bottom); if (!sub_off.invalid) { - as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = off - bottom + // sub_off = off - bottom + as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); return as_extdtr(ls, size, IsSigned, Offset, rt, EDtrAddr(ScratchRegister, EDtrOffImm(bottom)), cc); } - sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off + // sub_off = neg_bottom + off + sub_off = Imm8(off + neg_bottom); if (!sub_off.invalid && bottom != 0) { - JS_ASSERT(neg_bottom < 0x100); // Guarded against by: bottom != 0 - as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off + // Guarded against by: bottom != 0 + JS_ASSERT(neg_bottom < 0x100); + // sub_off = neg_bottom + off + as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); return as_extdtr(ls, size, IsSigned, Offset, rt, EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)), cc); @@ -1322,16 +1341,15 @@ b_type() void MacroAssemblerARM::ma_b(void *target, Relocation::Kind reloc, Assembler::Condition c) { - // we know the absolute address of the target, but not our final - // location (with relocating GC, we *can't* know our final location) - // for now, I'm going to be conservative, and load this with an - // absolute address + // We know the absolute address of the target, but not our final location + // (with relocating GC, we *can't* know our final location) for now, I'm + // going to be conservative, and load this with an absolute address uint32_t trg = (uint32_t)target; switch (b_type()) { case Assembler::B_MOVWT: as_movw(ScratchRegister, Imm16(trg & 0xffff), c); as_movt(ScratchRegister, Imm16(trg >> 16), c); - // this is going to get the branch predictor pissed off. + // This is going to get the branch predictor pissed off. as_bx(ScratchRegister, c); break; case Assembler::B_LDR_BX: @@ -1482,7 +1500,7 @@ MacroAssemblerARM::ma_vimm(double value, FloatRegister dest, Condition cc) if (DoubleLowWord(value) == 0) { if (DoubleHighWord(value) == 0) { // To zero a register, load 1.0, then execute dN <- dN - dN - as_vimm(dest, VFPImm::one, cc); + as_vimm(dest, VFPImm::One, cc); as_vsub(dest, dest, dest, cc); return; } @@ -1511,18 +1529,19 @@ MacroAssemblerARM::ma_vimm_f32(float value, FloatRegister dest, Condition cc) VFPRegister vd = VFPRegister(dest).singleOverlay(); if (HasVFPv3()) { if (Float32Word(value) == 0) { - // To zero a register, load 1.0, then execute sN <- sN - sN - as_vimm(vd, VFPImm::one, cc); + // To zero a register, load 1.0, then execute sN <- sN - sN. + as_vimm(vd, VFPImm::One, cc); as_vsub(vd, vd, vd, cc); return; } - // Note that the vimm immediate float32 instruction encoding differs from the - // vimm immediate double encoding, but this difference matches the difference - // in the floating point formats, so it is possible to convert the float32 to - // a double and then use the double encoding paths. It is still necessary to - // firstly check that the double low word is zero because some float32 - // numbers set these bits and this can not be ignored. + // Note that the vimm immediate float32 instruction encoding differs + // from the vimm immediate double encoding, but this difference matches + // the difference in the floating point formats, so it is possible to + // convert the float32 to a double and then use the double encoding + // paths. It is still necessary to firstly check that the double low + // word is zero because some float32 numbers set these bits and this can + // not be ignored. double doubleValue = value; if (DoubleLowWord(value) == 0) { VFPImm enc(DoubleHighWord(doubleValue)); @@ -1634,36 +1653,48 @@ MacroAssemblerARM::ma_vdtr(LoadStore ls, const Operand &addr, VFPRegister rt, Co if (off > -1024 && off < 1024) return as_vdtr(ls, rt, addr.toVFPAddr(), cc); - // We cannot encode this offset in a a single ldr. Try to encode it as - // an add scratch, base, imm; ldr dest, [scratch, +offset]. + // We cannot encode this offset in a a single ldr. Try to encode it as an + // add scratch, base, imm; ldr dest, [scratch, +offset]. int bottom = off & (0xff << 2); int neg_bottom = (0x100 << 2) - bottom; - // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities. + // At this point, both off - bottom and off + neg_bottom will be + // reasonable-ish quantities. // - // Note a neg_bottom of 0x400 can not be encoded as an immediate negative offset in the - // instruction and this occurs when bottom is zero, so this case is guarded against below. + // Note a neg_bottom of 0x400 can not be encoded as an immediate negative + // offset in the instruction and this occurs when bottom is zero, so this + // case is guarded against below. if (off < 0) { - Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off + // sub_off = bottom - off + Operand2 sub_off = Imm8(-(off - bottom)); if (!sub_off.invalid) { - as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); // - sub_off = off - bottom + // - sub_off = off - bottom + as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc); } - sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off + // sub_off = -neg_bottom - off + sub_off = Imm8(-(off + neg_bottom)); if (!sub_off.invalid && bottom != 0) { - JS_ASSERT(neg_bottom < 0x400); // Guarded against by: bottom != 0 - as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off + // Guarded against by: bottom != 0 + JS_ASSERT(neg_bottom < 0x400); + // - sub_off = neg_bottom + off + as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc); } } else { - Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom + // sub_off = off - bottom + Operand2 sub_off = Imm8(off - bottom); if (!sub_off.invalid) { - as_add(ScratchRegister, base, sub_off, NoSetCond, cc); // sub_off = off - bottom + // sub_off = off - bottom + as_add(ScratchRegister, base, sub_off, NoSetCond, cc); return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc); } - sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off + // sub_off = neg_bottom + off + sub_off = Imm8(off + neg_bottom); if (!sub_off.invalid && bottom != 0) { - JS_ASSERT(neg_bottom < 0x400); // Guarded against by: bottom != 0 - as_add(ScratchRegister, base, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off + // Guarded against by: bottom != 0 + JS_ASSERT(neg_bottom < 0x400); + // sub_off = neg_bottom + off + as_add(ScratchRegister, base, sub_off, NoSetCond, cc); return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc); } } @@ -2255,8 +2286,8 @@ MacroAssemblerARMCompat::loadDouble(const Address &address, FloatRegister dest) void MacroAssemblerARMCompat::loadDouble(const BaseIndex &src, FloatRegister dest) { - // VFP instructions don't even support register Base + register Index modes, so - // just add the index, then handle the offset like normal + // VFP instructions don't even support register Base + register Index modes, + // so just add the index, then handle the offset like normal Register base = src.base; Register index = src.index; uint32_t scale = Imm32::ShiftOf(src.scale).value; @@ -2277,8 +2308,8 @@ MacroAssemblerARMCompat::loadFloatAsDouble(const Address &address, FloatRegister void MacroAssemblerARMCompat::loadFloatAsDouble(const BaseIndex &src, FloatRegister dest) { - // VFP instructions don't even support register Base + register Index modes, so - // just add the index, then handle the offset like normal + // VFP instructions don't even support register Base + register Index modes, + // so just add the index, then handle the offset like normal Register base = src.base; Register index = src.index; uint32_t scale = Imm32::ShiftOf(src.scale).value; @@ -2299,8 +2330,8 @@ MacroAssemblerARMCompat::loadFloat32(const Address &address, FloatRegister dest) void MacroAssemblerARMCompat::loadFloat32(const BaseIndex &src, FloatRegister dest) { - // VFP instructions don't even support register Base + register Index modes, so - // just add the index, then handle the offset like normal + // VFP instructions don't even support register Base + register Index modes, + // so just add the index, then handle the offset like normal Register base = src.base; Register index = src.index; uint32_t scale = Imm32::ShiftOf(src.scale).value; @@ -2469,16 +2500,16 @@ MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) // Convert the double into an unsigned fixed point value with 24 bits of // precision. The resulting number will look like 0xII.DDDDDD as_vcvtFixed(ScratchDoubleReg, false, 24, true); - // Move the fixed point value into an integer register + // Move the fixed point value into an integer register. as_vxfer(output, InvalidReg, ScratchFloat32Reg.uintOverlay(), FloatToCore); - // see if this value *might* have been an exact integer after adding 0.5 - // This tests the 1/2 through 1/16,777,216th places, but 0.5 needs to be tested out to - // the 1/140,737,488,355,328th place. + // See if this value *might* have been an exact integer after adding + // 0.5. This tests the 1/2 through 1/16,777,216th places, but 0.5 needs + // to be tested out to the 1/140,737,488,355,328th place. ma_tst(output, Imm32(0x00ffffff)); - // convert to a uint8 by shifting out all of the fraction bits + // Convert to a uint8 by shifting out all of the fraction bits. ma_lsr(Imm32(24), output, output); - // If any of the bottom 24 bits were non-zero, then we're good, since this number - // can't be exactly XX.0 + // If any of the bottom 24 bits were non-zero, then we're good, since + // this number can't be exactly XX.0 ma_b(¬Split, NonZero); as_vxfer(ScratchRegister, InvalidReg, input, FloatToCore); ma_cmp(ScratchRegister, Imm32(0)); @@ -2489,11 +2520,11 @@ MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) } else { Label outOfRange; ma_vcmpz(input); - // do the add, in place so we can reference it later + // Do the add, in place so we can reference it later. ma_vadd(input, ScratchDoubleReg, input); - // do the conversion to an integer. + // Ddo the conversion to an integer. as_vcvt(VFPRegister(ScratchDoubleReg).uintOverlay(), VFPRegister(input)); - // copy the converted value out + // Copy the converted value out. as_vxfer(output, InvalidReg, ScratchDoubleReg, FloatToCore); as_vmrs(pc); ma_mov(Imm32(0), output, NoSetCond, Overflow); // NaN => 0 @@ -2501,9 +2532,9 @@ MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) ma_cmp(output, Imm32(0xff)); ma_mov(Imm32(0xff), output, NoSetCond, Above); ma_b(&outOfRange, Above); - // convert it back to see if we got the same value back + // Convert it back to see if we got the same value back. as_vcvt(ScratchDoubleReg, VFPRegister(ScratchDoubleReg).uintOverlay()); - // do the check + // Do the check. as_vcmp(ScratchDoubleReg, input); as_vmrs(pc); ma_bic(Imm32(1), output, NoSetCond, Zero); @@ -3062,13 +3093,13 @@ void MacroAssemblerARMCompat::branchTestValue(Condition cond, const ValueOperand &value, const Value &v, Label *label) { - // If cond == NotEqual, branch when a.payload != b.payload || a.tag != b.tag. - // If the payloads are equal, compare the tags. If the payloads are not equal, - // short circuit true (NotEqual). + // If cond == NotEqual, branch when a.payload != b.payload || a.tag != + // b.tag. If the payloads are equal, compare the tags. If the payloads are + // not equal, short circuit true (NotEqual). // // If cand == Equal, branch when a.payload == b.payload && a.tag == b.tag. - // If the payloads are equal, compare the tags. If the payloads are not equal, - // short circuit false (NotEqual). + // If the payloads are equal, compare the tags. If the payloads are not + // equal, short circuit false (NotEqual). jsval_layout jv = JSVAL_TO_IMPL(v); if (v.isMarkable()) ma_cmp(value.payloadReg(), ImmGCPtr(reinterpret_cast(v.toGCThing()))); @@ -3105,7 +3136,7 @@ MacroAssemblerARMCompat::branchTestValue(Condition cond, const Address &valaddr, } } -// unboxing code +// Unboxing code. void MacroAssemblerARMCompat::unboxNonDouble(const ValueOperand &operand, Register dest) { @@ -3180,11 +3211,11 @@ MacroAssemblerARMCompat::boolValueToDouble(const ValueOperand &operand, FloatReg void MacroAssemblerARMCompat::int32ValueToDouble(const ValueOperand &operand, FloatRegister dest) { - // transfer the integral value to a floating point register + // Transfer the integral value to a floating point register. VFPRegister vfpdest = VFPRegister(dest); as_vxfer(operand.payloadReg(), InvalidReg, ScratchFloat32Reg.sintOverlay(), CoreToFloat); - // convert the value to a double. + // Convert the value to a double. as_vcvt(vfpdest, ScratchFloat32Reg.sintOverlay()); } @@ -3201,11 +3232,11 @@ MacroAssemblerARMCompat::boolValueToFloat32(const ValueOperand &operand, FloatRe void MacroAssemblerARMCompat::int32ValueToFloat32(const ValueOperand &operand, FloatRegister dest) { - // transfer the integral value to a floating point register + // Transfer the integral value to a floating point register. VFPRegister vfpdest = VFPRegister(dest).singleOverlay(); as_vxfer(operand.payloadReg(), InvalidReg, vfpdest.sintOverlay(), CoreToFloat); - // convert the value to a float. + // Convert the value to a float. as_vcvt(vfpdest, vfpdest.sintOverlay()); } @@ -3240,9 +3271,10 @@ MacroAssemblerARMCompat::loadInt32OrDouble(Register base, Register index, FloatR JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0); // If it's an int, convert it to double. - ma_alu(base, lsl(index, shift), ScratchRegister, op_add); + ma_alu(base, lsl(index, shift), ScratchRegister, OpAdd); - // Since we only have one scratch register, we need to stomp over it with the tag + // Since we only have one scratch register, we need to stomp over it with + // the tag. ma_ldr(Address(ScratchRegister, NUNBOX32_TYPE_OFFSET), ScratchRegister); branchTestInt32(Assembler::NotEqual, ScratchRegister, ¬Int32); @@ -3255,7 +3287,7 @@ MacroAssemblerARMCompat::loadInt32OrDouble(Register base, Register index, FloatR bind(¬Int32); // First, recompute the offset that had been stored in the scratch register // since the scratch register was overwritten loading in the type. - ma_alu(base, lsl(index, shift), ScratchRegister, op_add); + ma_alu(base, lsl(index, shift), ScratchRegister, OpAdd); ma_vldr(Address(ScratchRegister, 0), dest); bind(&end); } @@ -3266,7 +3298,7 @@ MacroAssemblerARMCompat::loadConstantDouble(double dp, FloatRegister dest) as_FImm64Pool(dest, dp); } - // treat the value as a boolean, and set condition codes accordingly +// Treat the value as a boolean, and set condition codes accordingly. Assembler::Condition MacroAssemblerARMCompat::testInt32Truthy(bool truthy, const ValueOperand &operand) @@ -3308,7 +3340,7 @@ MacroAssemblerARMCompat::extractTag(const Address &address, Register scratch) Register MacroAssemblerARMCompat::extractTag(const BaseIndex &address, Register scratch) { - ma_alu(address.base, lsl(address.index, address.scale), scratch, op_add, NoSetCond); + ma_alu(address.base, lsl(address.index, address.scale), scratch, OpAdd, NoSetCond); return extractTag(Address(scratch, address.offset), scratch); } @@ -3381,12 +3413,12 @@ MacroAssemblerARMCompat::storeValue(ValueOperand val, const BaseIndex &dest) } ma_strd(val.payloadReg(), val.typeReg(), EDtrAddr(dest.base, EDtrOffReg(tmpIdx))); } else { - ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add); + ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, OpAdd); ma_strd(val.payloadReg(), val.typeReg(), EDtrAddr(ScratchRegister, EDtrOffImm(dest.offset))); } } else { - ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add); + ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, OpAdd); storeValue(val, Address(ScratchRegister, dest.offset)); } } @@ -3405,12 +3437,12 @@ MacroAssemblerARMCompat::loadValue(const BaseIndex &addr, ValueOperand val) } ma_ldrd(EDtrAddr(addr.base, EDtrOffReg(tmpIdx)), val.payloadReg(), val.typeReg()); } else { - ma_alu(addr.base, lsl(addr.index, addr.scale), ScratchRegister, op_add); + ma_alu(addr.base, lsl(addr.index, addr.scale), ScratchRegister, OpAdd); ma_ldrd(EDtrAddr(ScratchRegister, EDtrOffImm(addr.offset)), val.payloadReg(), val.typeReg()); } } else { - ma_alu(addr.base, lsl(addr.index, addr.scale), ScratchRegister, op_add); + ma_alu(addr.base, lsl(addr.index, addr.scale), ScratchRegister, OpAdd); loadValue(Address(ScratchRegister, addr.offset), val); } } @@ -3421,22 +3453,24 @@ MacroAssemblerARMCompat::loadValue(Address src, ValueOperand val) Operand srcOp = Operand(src); Operand payload = ToPayload(srcOp); Operand type = ToType(srcOp); - // TODO: copy this code into a generic function that acts on all sequences of memory accesses + // TODO: copy this code into a generic function that acts on all sequences + // of memory accesses if (isValueDTRDCandidate(val)) { - // If the value we want is in two consecutive registers starting with an even register, - // they can be combined as a single ldrd. + // If the value we want is in two consecutive registers starting with an + // even register, they can be combined as a single ldrd. int offset = srcOp.disp(); if (offset < 256 && offset > -256) { ma_ldrd(EDtrAddr(Register::FromCode(srcOp.base()), EDtrOffImm(srcOp.disp())), val.payloadReg(), val.typeReg()); return; } } - // if the value is lower than the type, then we may be able to use an ldm instruction + // If the value is lower than the type, then we may be able to use an ldm + // instruction. if (val.payloadReg().code() < val.typeReg().code()) { if (srcOp.disp() <= 4 && srcOp.disp() >= -8 && (srcOp.disp() & 3) == 0) { - // turns out each of the 4 value -8, -4, 0, 4 corresponds exactly with one of - // LDM{DB, DA, IA, IB} + // Turns out each of the 4 value -8, -4, 0, 4 corresponds exactly + // with one of LDM{DB, DA, IA, IB} DTMMode mode; switch(srcOp.disp()) { case -8: @@ -3461,8 +3495,8 @@ MacroAssemblerARMCompat::loadValue(Address src, ValueOperand val) return; } } - // Ensure that loading the payload does not erase the pointer to the - // Value in memory. + // Ensure that loading the payload does not erase the pointer to the Value + // in memory. if (Register::FromCode(type.base()) != val.payloadReg()) { ma_ldr(payload, val.payloadReg()); ma_ldr(type, val.typeReg()); @@ -3538,8 +3572,9 @@ MacroAssemblerARMCompat::storePayload(const Value &val, const BaseIndex &dest) else ma_mov(Imm32(jv.s.payload.i32), ScratchRegister); - // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index << shift + imm] - // cannot be encoded into a single instruction, and cannot be integrated into the as_dtr call. + // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index + // << shift + imm] cannot be encoded into a single instruction, and cannot + // be integrated into the as_dtr call. JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0); as_dtr(IsStore, 32, Offset, ScratchRegister, @@ -3553,12 +3588,13 @@ MacroAssemblerARMCompat::storePayload(Register src, const BaseIndex &dest) MOZ_ASSERT(shift < 32); MOZ_ASSERT(dest.offset == 0); - // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index << shift + imm] - // cannot be encoded into a single instruction, and cannot be integrated into the as_dtr call. + // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index + // << shift + imm] cannot be encoded into a single instruction, and cannot + // be integrated into the as_dtr call. JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0); - // Technically, shift > -32 can be handle by changing LSL to ASR, but should never come up, - // and this is one less code path to get wrong. + // Technically, shift > -32 can be handle by changing LSL to ASR, but should + // never come up, and this is one less code path to get wrong. as_dtr(IsStore, 32, Offset, src, DTRAddr(dest.base, DtrRegImmShift(dest.index, LSL, shift))); } @@ -3585,32 +3621,32 @@ MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, const BaseIndex &dest) MOZ_ASSERT(index != ScratchRegister); // A value needs to be store a value int base + index << shift + 4. - // Arm cannot handle this in a single operand, so a temp register is required. - // However, the scratch register is presently in use to hold the immediate that - // is being stored into said memory location. Work around this by modifying - // the base so the valid [base + index << shift] format can be used, then - // restore it. + // ARM cannot handle this in a single operand, so a temp register is + // required. However, the scratch register is presently in use to hold the + // immediate that is being stored into said memory location. Work around + // this by modifying the base so the valid [base + index << shift] format + // can be used, then restore it. ma_add(base, Imm32(NUNBOX32_TYPE_OFFSET), base); ma_mov(tag, ScratchRegister); ma_str(ScratchRegister, DTRAddr(base, DtrRegImmShift(index, LSL, shift))); ma_sub(base, Imm32(NUNBOX32_TYPE_OFFSET), base); } -// ARM says that all reads of pc will return 8 higher than the -// address of the currently executing instruction. This means we are -// correctly storing the address of the instruction after the call -// in the register. -// Also ION is breaking the ARM EABI here (sort of). The ARM EABI -// says that a function call should move the pc into the link register, -// then branch to the function, and *sp is data that is owned by the caller, -// not the callee. The ION ABI says *sp should be the address that -// we will return to when leaving this function +// ARM says that all reads of pc will return 8 higher than the address of the +// currently executing instruction. This means we are correctly storing the +// address of the instruction after the call in the register. +// +// Also ION is breaking the ARM EABI here (sort of). The ARM EABI says that a +// function call should move the pc into the link register, then branch to the +// function, and *sp is data that is owned by the caller, not the callee. The +// ION ABI says *sp should be the address that we will return to when leaving +// this function. void MacroAssemblerARM::ma_callIon(const Register r) { - // When the stack is 8 byte aligned, - // we want to decrement sp by 8, and write pc+8 into the new sp. - // when we return from this call, sp will be its present value minus 4. + // When the stack is 8 byte aligned, we want to decrement sp by 8, and write + // pc + 8 into the new sp. When we return from this call, sp will be its + // present value minus 4. AutoForbidPools afp(this); as_dtr(IsStore, 32, PreIndex, pc, DTRAddr(sp, DtrOffImm(-8))); as_blx(r); @@ -3618,8 +3654,8 @@ MacroAssemblerARM::ma_callIon(const Register r) void MacroAssemblerARM::ma_callIonNoPush(const Register r) { - // Since we just write the return address into the stack, which is - // popped on return, the net effect is removing 4 bytes from the stack + // Since we just write the return address into the stack, which is popped on + // return, the net effect is removing 4 bytes from the stack. AutoForbidPools afp(this); as_dtr(IsStore, 32, Offset, pc, DTRAddr(sp, DtrOffImm(0))); as_blx(r); @@ -3628,9 +3664,9 @@ MacroAssemblerARM::ma_callIonNoPush(const Register r) void MacroAssemblerARM::ma_callIonHalfPush(const Register r) { - // The stack is unaligned by 4 bytes. - // We push the pc to the stack to align the stack before the call, when we - // return the pc is poped and the stack is restored to its unaligned state. + // The stack is unaligned by 4 bytes. We push the pc to the stack to align + // the stack before the call, when we return the pc is poped and the stack + // is restored to its unaligned state. AutoForbidPools afp(this); ma_push(pc); as_blx(r); @@ -3713,7 +3749,7 @@ MacroAssemblerARMCompat::setupUnalignedABICall(uint32_t args, Register scratch) ma_mov(sp, scratch); - // Force sp to be aligned + // Force sp to be aligned. ma_and(Imm32(~(StackAlignment - 1)), sp, sp); ma_push(scratch); } @@ -3729,17 +3765,18 @@ MacroAssemblerARMCompat::passHardFpABIArg(const MoveOperand &from, MoveOp::Type switch (type) { case MoveOp::FLOAT32: case MoveOp::DOUBLE: { - // N.B. this isn't a limitation of the ABI, it is a limitation of the compiler right now. - // There isn't a good way to handle odd numbered single registers, so everything goes to hell - // when we try. Current fix is to never use more than one float in a function call. - // Fix coming along with complete float32 support in bug 957504. + // N.B. This isn't a limitation of the ABI, it is a limitation of the + // compiler right now. There isn't a good way to handle odd numbered + // single registers, so everything goes to hell when we try. Current fix + // is to never use more than one float in a function call. Fix coming + // along with complete float32 support in bug 957504. JS_ASSERT(!usedFloat32_); if (type == MoveOp::FLOAT32) usedFloat32_ = true; FloatRegister fr; if (GetFloatArgReg(usedIntSlots_, usedFloatSlots_, &fr)) { if (from.isFloatReg() && from.floatReg() == fr) { - // Nothing to do; the value is in the right register already + // Nothing to do; the value is in the right register already. usedFloatSlots_++; if (type == MoveOp::FLOAT32) passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32; @@ -3749,8 +3786,8 @@ MacroAssemblerARMCompat::passHardFpABIArg(const MoveOperand &from, MoveOp::Type } to = MoveOperand(fr); } else { - // If (and only if) the integer registers have started spilling, do we - // need to take the register's alignment into account + // If (and only if) the integer registers have started spilling, do + // we need to take the register's alignment into account. uint32_t disp = INT_MAX; if (type == MoveOp::FLOAT32) disp = GetFloat32ArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_); @@ -3769,7 +3806,7 @@ MacroAssemblerARMCompat::passHardFpABIArg(const MoveOperand &from, MoveOp::Type Register r; if (GetIntArgReg(usedIntSlots_, usedFloatSlots_, &r)) { if (from.isGeneralReg() && from.reg() == r) { - // Nothing to do; the value is in the right register already + // Nothing to do; the value is in the right register already. usedIntSlots_++; passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General; return; @@ -3825,7 +3862,7 @@ MacroAssemblerARMCompat::passSoftFpABIArg(const MoveOperand &from, MoveOp::Type floatArgsInGPRValid[destReg.code() >> 1] = true; useResolver = false; } else if (from.isGeneralReg() && from.reg() == destReg) { - // No need to move anything + // No need to move anything. useResolver = false; } else { dest = MoveOperand(destReg); @@ -3892,7 +3929,8 @@ MacroAssemblerARMCompat::callWithABIPre(uint32_t *stackAdjust, bool callFromAsmJ *stackAdjust += ComputeByteAlignment(framePushed_ + *stackAdjust + alignmentAtPrologue, StackAlignment); } else { - // sizeof(intptr_t) account for the saved stack pointer pushed by setupUnalignedABICall + // sizeof(intptr_t) accounts for the saved stack pointer pushed by + // setupUnalignedABICall. *stackAdjust += ComputeByteAlignment(*stackAdjust + sizeof(intptr_t), StackAlignment); } @@ -3920,8 +3958,8 @@ MacroAssemblerARMCompat::callWithABIPre(uint32_t *stackAdjust, bool callFromAsmJ // Note: We can safely use the MoveOperand's displacement here, // even if the base is SP: MoveEmitter::toOperand adjusts // SP-relative operands by the difference between the current - // stack usage and stackAdjust, which emitter.finish() resets - // to 0. + // stack usage and stackAdjust, which emitter.finish() resets to + // 0. // // Warning: if the offset isn't within [-255,+255] then this // will assert-fail (or, if non-debug, load the wrong words). @@ -3966,8 +4004,8 @@ MacroAssemblerARMCompat::callWithABIPost(uint32_t stackAdjust, MoveOp::Type resu freeStack(stackAdjust); if (dynamicAlignment_) { - // x86 supports pop esp. on arm, that isn't well defined, so just - // do it manually + // While the x86 supports pop esp, on ARM that isn't well defined, so + // just do it manually. as_dtr(IsLoad, 32, Offset, sp, DTRAddr(sp, DtrOffImm(0))); } @@ -4042,9 +4080,9 @@ MacroAssemblerARMCompat::callWithABI(AsmJSImmPtr imm, MoveOp::Type result) void MacroAssemblerARMCompat::callWithABI(const Address &fun, MoveOp::Type result) { - // Load the callee in r12, no instruction between the ldr and call - // should clobber it. Note that we can't use fun.base because it may - // be one of the IntArg registers clobbered before the call. + // Load the callee in r12, no instruction between the ldr and call should + // clobber it. Note that we can't use fun.base because it may be one of the + // IntArg registers clobbered before the call. ma_ldr(fun, r12); uint32_t stackAdjust; callWithABIPre(&stackAdjust); @@ -4105,9 +4143,8 @@ MacroAssemblerARMCompat::handleFailureWithHandlerTail() ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp); jump(r0); - // If we found a finally block, this must be a baseline frame. Push - // two values expected by JSOP_RETSUB: BooleanValue(true) and the - // exception. + // If we found a finally block, this must be a baseline frame. Push two + // values expected by JSOP_RETSUB: BooleanValue(true) and the exception. bind(&finally); ValueOperand exception = ValueOperand(r1, r2); loadValue(Operand(sp, offsetof(ResumeFromException, exception)), exception); @@ -4120,7 +4157,8 @@ MacroAssemblerARMCompat::handleFailureWithHandlerTail() pushValue(exception); jump(r0); - // Only used in debug mode. Return BaselineFrame->returnValue() to the caller. + // Only used in debug mode. Return BaselineFrame->returnValue() to the + // caller. bind(&return_); ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11); ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp); @@ -4129,8 +4167,8 @@ MacroAssemblerARMCompat::handleFailureWithHandlerTail() pop(r11); ret(); - // If we are bailing out to baseline to handle an exception, jump to - // the bailout tail stub. + // If we are bailing out to baseline to handle an exception, jump to the + // bailout tail stub. bind(&bailout); ma_ldr(Operand(sp, offsetof(ResumeFromException, bailoutInfo)), r2); ma_mov(Imm32(BAILOUT_RETURN_OK), r0); @@ -4159,10 +4197,10 @@ MacroAssemblerARMCompat::floor(FloatRegister input, Register output, Label *bail // NaN is always a bail condition, just bail directly. ma_b(bail, Assembler::Overflow); - // The argument is a positive number, truncation is the path to glory; - // Since it is known to be > 0.0, explicitly convert to a larger range, - // then a value that rounds to INT_MAX is explicitly different from an - // argument that clamps to INT_MAX + // The argument is a positive number, truncation is the path to glory. Since + // it is known to be > 0.0, explicitly convert to a larger range, then a + // value that rounds to INT_MAX is explicitly different from an argument + // that clamps to INT_MAX. ma_vcvt_F64_U32(input, ScratchDoubleReg.uintOverlay()); ma_vxfer(ScratchDoubleReg.uintOverlay(), output); ma_mov(output, output, SetCond); @@ -4171,29 +4209,28 @@ MacroAssemblerARMCompat::floor(FloatRegister input, Register output, Label *bail bind(&handleZero); // Move the top word of the double into the output reg, if it is non-zero, - // then the original value was -0.0 + // then the original value was -0.0. as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1); ma_cmp(output, Imm32(0)); ma_b(bail, NonZero); ma_b(&fin); bind(&handleNeg); - // Negative case, negate, then start dancing + // Negative case, negate, then start dancing. ma_vneg(input, input); ma_vcvt_F64_U32(input, ScratchDoubleReg.uintOverlay()); ma_vxfer(ScratchDoubleReg.uintOverlay(), output); ma_vcvt_U32_F64(ScratchDoubleReg.uintOverlay(), ScratchDoubleReg); compareDouble(ScratchDoubleReg, input); ma_add(output, Imm32(1), output, NoSetCond, NotEqual); - // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, - // the result will still be a negative number + // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the + // result will still be a negative number. ma_rsb(output, Imm32(0), output, SetCond); // Flip the negated input back to its original value. ma_vneg(input, input); - // If the result looks non-negative, then this value didn't actually fit into - // the int range, and special handling is required. - // zero is also caught by this case, but floor of a negative number - // should never be zero. + // If the result looks non-negative, then this value didn't actually fit + // into the int range, and special handling is required. Zero is also caught + // by this case, but floor of a negative number should never be zero. ma_b(bail, NotSigned); bind(&fin); @@ -4211,10 +4248,10 @@ MacroAssemblerARMCompat::floorf(FloatRegister input, Register output, Label *bai // NaN is always a bail condition, just bail directly. ma_b(bail, Assembler::Overflow); - // The argument is a positive number, truncation is the path to glory; - // Since it is known to be > 0.0, explicitly convert to a larger range, - // then a value that rounds to INT_MAX is explicitly different from an - // argument that clamps to INT_MAX + // The argument is a positive number, truncation is the path to glory; Since + // it is known to be > 0.0, explicitly convert to a larger range, then a + // value that rounds to INT_MAX is explicitly different from an argument + // that clamps to INT_MAX. ma_vcvt_F32_U32(input, ScratchFloat32Reg.uintOverlay()); ma_vxfer(VFPRegister(ScratchFloat32Reg).uintOverlay(), output); ma_mov(output, output, SetCond); @@ -4223,29 +4260,28 @@ MacroAssemblerARMCompat::floorf(FloatRegister input, Register output, Label *bai bind(&handleZero); // Move the top word of the double into the output reg, if it is non-zero, - // then the original value was -0.0 + // then the original value was -0.0. as_vxfer(output, InvalidReg, VFPRegister(input).singleOverlay(), FloatToCore, Always, 0); ma_cmp(output, Imm32(0)); ma_b(bail, NonZero); ma_b(&fin); bind(&handleNeg); - // Negative case, negate, then start dancing + // Negative case, negate, then start dancing. ma_vneg_f32(input, input); ma_vcvt_F32_U32(input, ScratchFloat32Reg.uintOverlay()); ma_vxfer(VFPRegister(ScratchFloat32Reg).uintOverlay(), output); ma_vcvt_U32_F32(ScratchFloat32Reg.uintOverlay(), ScratchFloat32Reg); compareFloat(ScratchFloat32Reg, input); ma_add(output, Imm32(1), output, NoSetCond, NotEqual); - // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, - // the result will still be a negative number + // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the + // result will still be a negative number. ma_rsb(output, Imm32(0), output, SetCond); // Flip the negated input back to its original value. ma_vneg_f32(input, input); - // If the result looks non-negative, then this value didn't actually fit into - // the int range, and special handling is required. - // zero is also caught by this case, but floor of a negative number - // should never be zero. + // If the result looks non-negative, then this value didn't actually fit + // into the int range, and special handling is required. Zero is also caught + // by this case, but floor of a negative number should never be zero. ma_b(bail, NotSigned); bind(&fin); @@ -4270,8 +4306,8 @@ MacroAssemblerARMCompat::ceil(FloatRegister input, Register output, Label *bail) compareDouble(input, ScratchDoubleReg); ma_b(bail, Assembler::GreaterThan); - // We are in the ]-Inf; -1] range: ceil(x) == -floor(-x) and floor can - // be computed with direct truncation here (x > 0). + // We are in the ]-Inf; -1] range: ceil(x) == -floor(-x) and floor can be + // computed with direct truncation here (x > 0). ma_vneg(input, ScratchDoubleReg); FloatRegister ScratchUIntReg = ScratchDoubleReg.uintOverlay(); ma_vcvt_F64_U32(ScratchDoubleReg, ScratchUIntReg); @@ -4296,7 +4332,7 @@ MacroAssemblerARMCompat::ceil(FloatRegister input, Register output, Label *bail) ma_vcvt_U32_F64(ScratchUIntReg, ScratchDoubleReg); compareDouble(ScratchDoubleReg, input); ma_add(output, Imm32(1), output, NoSetCond, NotEqual); - // Bail out if the add overflowed or the result is non positive + // Bail out if the add overflowed or the result is non positive. ma_mov(output, output, SetCond); ma_b(bail, Signed); ma_b(bail, Zero); @@ -4323,8 +4359,8 @@ MacroAssemblerARMCompat::ceilf(FloatRegister input, Register output, Label *bail compareFloat(input, ScratchFloat32Reg); ma_b(bail, Assembler::GreaterThan); - // We are in the ]-Inf; -1] range: ceil(x) == -floor(-x) and floor can - // be computed with direct truncation here (x > 0). + // We are in the ]-Inf; -1] range: ceil(x) == -floor(-x) and floor can be + // computed with direct truncation here (x > 0). ma_vneg_f32(input, ScratchFloat32Reg); FloatRegister ScratchUIntReg = ScratchDoubleReg.uintOverlay(); ma_vcvt_F32_U32(ScratchFloat32Reg, ScratchUIntReg); @@ -4349,7 +4385,7 @@ MacroAssemblerARMCompat::ceilf(FloatRegister input, Register output, Label *bail ma_vcvt_U32_F32(ScratchUIntReg, ScratchFloat32Reg); compareFloat(ScratchFloat32Reg, input); ma_add(output, Imm32(1), output, NoSetCond, NotEqual); - // Bail out if the add overflowed or the result is non positive + // Bail out if the add overflowed or the result is non positive. ma_mov(output, output, SetCond); ma_b(bail, Signed); ma_b(bail, Zero); @@ -4361,7 +4397,6 @@ CodeOffsetLabel MacroAssemblerARMCompat::toggledJump(Label *label) { // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp(). - BufferOffset b = ma_b(label, Always, true); CodeOffsetLabel ret(b.getOffset()); return ret; @@ -4386,13 +4421,15 @@ MacroAssemblerARMCompat::round(FloatRegister input, Register output, Label *bail Label handleZero; Label handleNeg; Label fin; - // Do a compare based on the original value, then do most other things based on the - // shifted value. + // Do a compare based on the original value, then do most other things based + // on the shifted value. ma_vcmpz(input); // Adding 0.5 is technically incorrect! - // We want to add 0.5 to negative numbers, and 0.49999999999999999 to positive numbers. + // We want to add 0.5 to negative numbers, and 0.49999999999999999 to + // positive numbers. ma_vimm(0.5, ScratchDoubleReg); - // Since we already know the sign bit, flip all numbers to be positive, stored in tmp. + // Since we already know the sign bit, flip all numbers to be positive, + // stored in tmp. ma_vabs(input, tmp); // Add 0.5, storing the result into tmp. ma_vadd(ScratchDoubleReg, tmp, tmp); @@ -4402,10 +4439,10 @@ MacroAssemblerARMCompat::round(FloatRegister input, Register output, Label *bail // NaN is always a bail condition, just bail directly. ma_b(bail, Assembler::Overflow); - // The argument is a positive number, truncation is the path to glory; - // Since it is known to be > 0.0, explicitly convert to a larger range, - // then a value that rounds to INT_MAX is explicitly different from an - // argument that clamps to INT_MAX + // The argument is a positive number, truncation is the path to glory; Since + // it is known to be > 0.0, explicitly convert to a larger range, then a + // value that rounds to INT_MAX is explicitly different from an argument + // that clamps to INT_MAX. ma_vcvt_F64_U32(tmp, ScratchDoubleReg.uintOverlay()); ma_vxfer(VFPRegister(ScratchDoubleReg).uintOverlay(), output); ma_mov(output, output, SetCond); @@ -4421,23 +4458,25 @@ MacroAssemblerARMCompat::round(FloatRegister input, Register output, Label *bail ma_b(&fin); bind(&handleNeg); - // Negative case, negate, then start dancing. This number may be positive, since we added 0.5 + // Negative case, negate, then start dancing. This number may be positive, + // since we added 0.5. ma_vcvt_F64_U32(tmp, ScratchDoubleReg.uintOverlay()); ma_vxfer(VFPRegister(ScratchDoubleReg).uintOverlay(), output); - // -output is now a correctly rounded value, unless the original value was exactly - // halfway between two integers, at which point, it has been rounded away from zero, when - // it should be rounded towards \infty. + // -output is now a correctly rounded value, unless the original value was + // exactly halfway between two integers, at which point, it has been rounded + // away from zero, when it should be rounded towards \infty. ma_vcvt_U32_F64(ScratchDoubleReg.uintOverlay(), ScratchDoubleReg); compareDouble(ScratchDoubleReg, tmp); ma_sub(output, Imm32(1), output, NoSetCond, Equal); - // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, - // the result will still be a negative number + // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the + // result will still be a negative number. ma_rsb(output, Imm32(0), output, SetCond); - // If the result looks non-negative, then this value didn't actually fit into - // the int range, and special handling is required, or it was zero, which means - // the result is actually -0.0 which also requires special handling. + // If the result looks non-negative, then this value didn't actually fit + // into the int range, and special handling is required, or it was zero, + // which means the result is actually -0.0 which also requires special + // handling. ma_b(bail, NotSigned); bind(&fin); @@ -4449,13 +4488,15 @@ MacroAssemblerARMCompat::roundf(FloatRegister input, Register output, Label *bai Label handleZero; Label handleNeg; Label fin; - // Do a compare based on the original value, then do most other things based on the - // shifted value. + // Do a compare based on the original value, then do most other things based + // on the shifted value. ma_vcmpz_f32(input); // Adding 0.5 is technically incorrect! - // We want to add 0.5 to negative numbers, and 0.49999999999999999 to positive numbers. + // We want to add 0.5 to negative numbers, and 0.49999999999999999 to + // positive numbers. ma_vimm_f32(0.5f, ScratchFloat32Reg); - // Since we already know the sign bit, flip all numbers to be positive, stored in tmp. + // Since we already know the sign bit, flip all numbers to be positive, + // stored in tmp. ma_vabs_f32(input, tmp); // Add 0.5, storing the result into tmp. ma_vadd_f32(ScratchFloat32Reg, tmp, tmp); @@ -4465,10 +4506,10 @@ MacroAssemblerARMCompat::roundf(FloatRegister input, Register output, Label *bai // NaN is always a bail condition, just bail directly. ma_b(bail, Assembler::Overflow); - // The argument is a positive number, truncation is the path to glory; - // Since it is known to be > 0.0, explicitly convert to a larger range, - // then a value that rounds to INT_MAX is explicitly different from an - // argument that clamps to INT_MAX + // The argument is a positive number, truncation is the path to glory; Since + // it is known to be > 0.0, explicitly convert to a larger range, then a + // value that rounds to INT_MAX is explicitly different from an argument + // that clamps to INT_MAX. ma_vcvt_F32_U32(tmp, ScratchFloat32Reg.uintOverlay()); ma_vxfer(VFPRegister(ScratchFloat32Reg).uintOverlay(), output); ma_mov(output, output, SetCond); @@ -4476,31 +4517,33 @@ MacroAssemblerARMCompat::roundf(FloatRegister input, Register output, Label *bai ma_b(&fin); bind(&handleZero); - // Move the whole float32 into the output reg, if it is non-zero, - // then the original value was -0.0 + // Move the whole float32 into the output reg, if it is non-zero, then the + // original value was -0.0. as_vxfer(output, InvalidReg, input, FloatToCore, Always, 0); ma_cmp(output, Imm32(0)); ma_b(bail, NonZero); ma_b(&fin); bind(&handleNeg); - // Negative case, negate, then start dancing. This number may be positive, since we added 0.5 + // Negative case, negate, then start dancing. This number may be positive, + // since we added 0.5. ma_vcvt_F32_U32(tmp, ScratchFloat32Reg.uintOverlay()); ma_vxfer(VFPRegister(ScratchFloat32Reg).uintOverlay(), output); - // -output is now a correctly rounded value, unless the original value was exactly - // halfway between two integers, at which point, it has been rounded away from zero, when - // it should be rounded towards \infty. + // -output is now a correctly rounded value, unless the original value was + // exactly halfway between two integers, at which point, it has been rounded + // away from zero, when it should be rounded towards \infty. ma_vcvt_U32_F32(ScratchFloat32Reg.uintOverlay(), ScratchFloat32Reg); compareFloat(ScratchFloat32Reg, tmp); ma_sub(output, Imm32(1), output, NoSetCond, Equal); - // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, - // the result will still be a negative number + // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the + // result will still be a negative number. ma_rsb(output, Imm32(0), output, SetCond); - // If the result looks non-negative, then this value didn't actually fit into - // the int range, and special handling is required, or it was zero, which means - // the result is actually -0.0 which also requires special handling. + // If the result looks non-negative, then this value didn't actually fit + // into the int range, and special handling is required, or it was zero, + // which means the result is actually -0.0 which also requires special + // handling. ma_b(bail, NotSigned); bind(&fin); @@ -4511,8 +4554,8 @@ MacroAssemblerARMCompat::jumpWithPatch(RepatchLabel *label, Condition cond) { ARMBuffer::PoolEntry pe; BufferOffset bo = as_BranchPool(0xdeadbeef, label, &pe, cond); - // Fill in a new CodeOffset with both the load and the - // pool entry that the instruction loads from. + // Fill in a new CodeOffset with both the load and the pool entry that the + // instruction loads from. CodeOffsetJump ret(bo.getOffset(), pe.encode()); return ret; } diff --git a/js/src/jit/arm/MacroAssembler-arm.h b/js/src/jit/arm/MacroAssembler-arm.h index 547df442b484..1e9ab8a425b1 100644 --- a/js/src/jit/arm/MacroAssembler-arm.h +++ b/js/src/jit/arm/MacroAssembler-arm.h @@ -25,17 +25,19 @@ static Register CallReg = ip; static const int defaultShift = 3; JS_STATIC_ASSERT(1 << defaultShift == sizeof(jsval)); -// MacroAssemblerARM is inheriting form Assembler defined in Assembler-arm.{h,cpp} +// MacroAssemblerARM is inheriting form Assembler defined in +// Assembler-arm.{h,cpp} class MacroAssemblerARM : public Assembler { protected: - // On ARM, some instructions require a second scratch register. This register - // defaults to lr, since it's non-allocatable (as it can be clobbered by some - // instructions). Allow the baseline compiler to override this though, since - // baseline IC stubs rely on lr holding the return address. + // On ARM, some instructions require a second scratch register. This + // register defaults to lr, since it's non-allocatable (as it can be + // clobbered by some instructions). Allow the baseline compiler to override + // this though, since baseline IC stubs rely on lr holding the return + // address. Register secondScratchReg_; - // higher level tag testing code + // Higher level tag testing code. Operand ToPayload(Operand base) { return Operand(Register::FromCode(base.base()), base.disp()); } @@ -85,10 +87,9 @@ class MacroAssemblerARM : public Assembler void negateDouble(FloatRegister reg); void inc64(AbsoluteAddress dest); - // somewhat direct wrappers for the low-level assembler funcitons - // bitops - // attempt to encode a virtual alu instruction using - // two real instructions. + // Somewhat direct wrappers for the low-level assembler funcitons + // bitops. Attempt to encode a virtual alu instruction using two real + // instructions. private: bool alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op, SetCond_ sc, Condition c); @@ -107,9 +108,11 @@ class MacroAssemblerARM : public Assembler RelocStyle rs, Instruction *i = nullptr); void ma_movPatchable(ImmPtr imm, Register dest, Assembler::Condition c, RelocStyle rs, Instruction *i = nullptr); - // These should likely be wrapped up as a set of macros - // or something like that. I cannot think of a good reason - // to explicitly have all of this code. + + // These should likely be wrapped up as a set of macros or something like + // that. I cannot think of a good reason to explicitly have all of this + // code. + // ALU based ops // mov void ma_mov(Register src, Register dest, @@ -147,7 +150,7 @@ class MacroAssemblerARM : public Assembler void ma_neg(Register src, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); - // and + // And void ma_and(Register src, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); @@ -162,11 +165,11 @@ class MacroAssemblerARM : public Assembler - // bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2) + // Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2) void ma_bic(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); - // exclusive or + // Exclusive or void ma_eor(Register src, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); @@ -180,7 +183,7 @@ class MacroAssemblerARM : public Assembler SetCond_ sc = NoSetCond, Condition c = Always); - // or + // Or void ma_orr(Register src, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); @@ -194,49 +197,49 @@ class MacroAssemblerARM : public Assembler SetCond_ sc = NoSetCond, Condition c = Always); - // arithmetic based ops - // add with carry + // Arithmetic based ops. + // Add with carry: void ma_adc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_adc(Register src, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_adc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); - // add + // Add: void ma_add(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_add(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_add(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_add(Register src1, Operand op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_add(Register src1, Imm32 op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); - // subtract with carry + // Subtract with carry: void ma_sbc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_sbc(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_sbc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); - // subtract + // Subtract: void ma_sub(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_sub(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_sub(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_sub(Register src1, Operand op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_sub(Register src1, Imm32 op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); - // reverse subtract + // Reverse subtract: void ma_rsb(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_rsb(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_rsb(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_rsb(Register src1, Imm32 op2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); - // reverse subtract with carry + // Reverse subtract with carry: void ma_rsc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_rsc(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); void ma_rsc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always); - // compares/tests - // compare negative (sets condition codes as src1 + src2 would) + // Compares/tests. + // Compare negative (sets condition codes as src1 + src2 would): void ma_cmn(Register src1, Imm32 imm, Condition c = Always); void ma_cmn(Register src1, Register src2, Condition c = Always); void ma_cmn(Register src1, Operand op, Condition c = Always); - // compare (src - src2) + // Compare (src - src2): void ma_cmp(Register src1, Imm32 imm, Condition c = Always); void ma_cmp(Register src1, ImmWord ptr, Condition c = Always); void ma_cmp(Register src1, ImmGCPtr ptr, Condition c = Always); @@ -244,38 +247,39 @@ class MacroAssemblerARM : public Assembler void ma_cmp(Register src1, Register src2, Condition c = Always); - // test for equality, (src1^src2) + // Test for equality, (src1 ^ src2): void ma_teq(Register src1, Imm32 imm, Condition c = Always); void ma_teq(Register src1, Register src2, Condition c = Always); void ma_teq(Register src1, Operand op, Condition c = Always); - // test (src1 & src2) + // Test (src1 & src2): void ma_tst(Register src1, Imm32 imm, Condition c = Always); void ma_tst(Register src1, Register src2, Condition c = Always); void ma_tst(Register src1, Operand op, Condition c = Always); - // multiplies. For now, there are only two that we care about. + // Multiplies. For now, there are only two that we care about. void ma_mul(Register src1, Register src2, Register dest); void ma_mul(Register src1, Imm32 imm, Register dest); Condition ma_check_mul(Register src1, Register src2, Register dest, Condition cond); Condition ma_check_mul(Register src1, Imm32 imm, Register dest, Condition cond); - // fast mod, uses scratch registers, and thus needs to be in the assembler - // implicitly assumes that we can overwrite dest at the beginning of the sequence + // Fast mod, uses scratch registers, and thus needs to be in the assembler + // implicitly assumes that we can overwrite dest at the beginning of the + // sequence. void ma_mod_mask(Register src, Register dest, Register hold, Register tmp, int32_t shift); - // mod, depends on integer divide instructions being supported + // Mod - depends on integer divide instructions being supported. void ma_smod(Register num, Register div, Register dest); void ma_umod(Register num, Register div, Register dest); - // division, depends on integer divide instructions being supported + // Division - depends on integer divide instructions being supported. void ma_sdiv(Register num, Register div, Register dest, Condition cond = Always); void ma_udiv(Register num, Register div, Register dest, Condition cond = Always); - // memory - // shortcut for when we know we're transferring 32 bits of data + // Memory: + // Shortcut for when we know we're transferring 32 bits of data. void ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt, Index mode = Offset, Condition cc = Always); @@ -298,7 +302,7 @@ class MacroAssemblerARM : public Assembler void ma_strb(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always); void ma_strh(Register rt, EDtrAddr addr, Index mode = Offset, Condition cc = Always); void ma_strd(Register rt, DebugOnly rt2, EDtrAddr addr, Index mode = Offset, Condition cc = Always); - // specialty for moving N bits of data, where n == 8,16,32,64 + // Specialty for moving N bits of data, where n == 8,16,32,64. BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned, Register rn, Register rm, Register rt, Index mode = Offset, Condition cc = Always, unsigned scale = TimesOne); @@ -312,19 +316,19 @@ class MacroAssemblerARM : public Assembler void ma_vpop(VFPRegister r); void ma_vpush(VFPRegister r); - // branches when done from within arm-specific code + // Branches when done from within arm-specific code. BufferOffset ma_b(Label *dest, Condition c = Always, bool isPatchable = false); void ma_bx(Register dest, Condition c = Always); void ma_b(void *target, Relocation::Kind reloc, Condition c = Always); - // this is almost NEVER necessary, we'll basically never be calling a label + // This is almost NEVER necessary, we'll basically never be calling a label // except, possibly in the crazy bailout-table case. void ma_bl(Label *dest, Condition c = Always); void ma_blx(Register dest, Condition c = Always); - //VFP/ALU + // VFP/ALU: void ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst); void ma_vsub(FloatRegister src1, FloatRegister src2, FloatRegister dst); @@ -356,19 +360,19 @@ class MacroAssemblerARM : public Assembler void ma_vneg_f32(FloatRegister src, FloatRegister dest, Condition cc = Always); - // source is F64, dest is I32 + // Source is F64, dest is I32: void ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest, Condition cc = Always); void ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest, Condition cc = Always); - // source is I32, dest is F64 + // Source is I32, dest is F64: void ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest, Condition cc = Always); void ma_vcvt_U32_F64(FloatRegister src, FloatRegister dest, Condition cc = Always); - // source is F32, dest is I32 + // Source is F32, dest is I32: void ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest, Condition cc = Always); void ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest, Condition cc = Always); - // source is I32, dest is F32 + // Source is I32, dest is F32: void ma_vcvt_I32_F32(FloatRegister src, FloatRegister dest, Condition cc = Always); void ma_vcvt_U32_F32(FloatRegister src, FloatRegister dest, Condition cc = Always); @@ -389,21 +393,22 @@ class MacroAssemblerARM : public Assembler BufferOffset ma_vstr(VFPRegister src, const Operand &addr, Condition cc = Always); BufferOffset ma_vstr(VFPRegister src, Register base, Register index, int32_t shift = defaultShift, Condition cc = Always); - // calls an Ion function, assumes that the stack is untouched (8 byte alinged) + // Calls an Ion function, assumes that the stack is untouched (8 byte + // aligned). void ma_callIon(const Register reg); - // callso an Ion function, assuming that sp has already been decremented + // Calls an Ion function, assuming that sp has already been decremented. void ma_callIonNoPush(const Register reg); - // calls an ion function, assuming that the stack is currently not 8 byte aligned + // Calls an ion function, assuming that the stack is currently not 8 byte + // aligned. void ma_callIonHalfPush(const Register reg); void ma_call(ImmPtr dest); - // Float registers can only be loaded/stored in continuous runs - // when using vstm/vldm. - // This function breaks set into continuous runs and loads/stores - // them at [rm]. rm will be modified and left in a state logically - // suitable for the next load/store. - // Returns the offset from [dm] for the logical next load/store. + // Float registers can only be loaded/stored in continuous runs when using + // vstm/vldm. This function breaks set into continuous runs and loads/stores + // them at [rm]. rm will be modified and left in a state logically suitable + // for the next load/store. Returns the offset from [dm] for the logical + // next load/store. int32_t transferMultipleByRuns(FloatRegisterSet set, LoadStore ls, Register rm, DTMMode mode) { @@ -420,9 +425,8 @@ class MacroAssemblerARM : public Assembler private: // Implementation for transferMultipleByRuns so we can use different - // iterators for forward/backward traversals. - // The sign argument should be 1 if we traverse forwards, -1 if we - // traverse backwards. + // iterators for forward/backward traversals. The sign argument should be 1 + // if we traverse forwards, -1 if we traverse backwards. template int32_t transferMultipleByRunsImpl(FloatRegisterSet set, LoadStore ls, Register rm, DTMMode mode, int32_t sign) @@ -453,17 +457,17 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM // Number of bytes the stack is adjusted inside a call to C. Calls to C may // not be nested. uint32_t args_; - // The actual number of arguments that were passed, used to assert that - // the initial number of arguments declared was correct. + // The actual number of arguments that were passed, used to assert that the + // initial number of arguments declared was correct. uint32_t passedArgs_; uint32_t passedArgTypes_; // ARM treats arguments as a vector in registers/memory, that looks like: // { r0, r1, r2, r3, [sp], [sp,+4], [sp,+8] ... } - // usedIntSlots_ keeps track of how many of these have been used. - // It bears a passing resemblance to passedArgs_, but a single argument - // can effectively use between one and three slots depending on its size and - // alignment requirements + // usedIntSlots_ keeps track of how many of these have been used. It bears a + // passing resemblance to passedArgs_, but a single argument can effectively + // use between one and three slots depending on its size and alignment + // requirements. uint32_t usedIntSlots_; #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR) uint32_t usedFloatSlots_; @@ -472,13 +476,13 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM #endif bool dynamicAlignment_; - // Used to work around the move resolver's lack of support for - // moving into register pairs, which the softfp ABI needs. + // Used to work around the move resolver's lack of support for moving into + // register pairs, which the softfp ABI needs. mozilla::Array floatArgsInGPR; mozilla::Array floatArgsInGPRValid; // Compute space needed for the function call and set the properties of the - // callee. It returns the space which has to be allocated for calling the + // callee. It returns the space which has to be allocated for calling the // function. // // arg Number of arguments of the function. @@ -489,9 +493,9 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM // Extra bytes currently pushed onto the frame beyond frameDepth_. This is // needed to compute offsets to stack slots while temporary space has been - // reserved for unexpected spills or C++ function calls. It is maintained - // by functions which track stack alignment, which for clear distinction - // use StudlyCaps (for example, Push, Pop). + // reserved for unexpected spills or C++ function calls. It is maintained by + // functions which track stack alignment, which for clear distinction use + // StudlyCaps (for example, Push, Pop). uint32_t framePushed_; void adjustFrame(int value) { setFramePushed(framePushed_ + value); @@ -505,9 +509,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM public: using MacroAssemblerARM::call; - // jumps + other functions that should be called from - // non-arm specific code... - // basically, an x86 front end on top of the ARM code. + // Jumps + other functions that should be called from non-arm specific + // code. Basically, an x86 front end on top of the ARM code. void j(Condition code , Label *dest) { as_b(dest, code); @@ -537,7 +540,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM as_blx(reg); } void call(Label *label) { - // for now, assume that it'll be nearby? + // For now, assume that it'll be nearby? as_bl(label, Always); } void call(ImmWord imm) { @@ -647,8 +650,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM CodeOffsetLabel toggledJump(Label *label); - // Emit a BLX or NOP instruction. ToggleCall can be used to patch - // this instruction. + // Emit a BLX or NOP instruction. ToggleCall can be used to patch this + // instruction. CodeOffsetLabel toggledCall(JitCode *target, bool enabled); CodeOffsetLabel pushWithPatch(ImmWord imm) { @@ -702,7 +705,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM return value.typeReg(); } - // higher level tag testing code + // Higher level tag testing code. Condition testInt32(Condition cond, const ValueOperand &value); Condition testBoolean(Condition cond, const ValueOperand &value); Condition testDouble(Condition cond, const ValueOperand &value); @@ -716,7 +719,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM Condition testPrimitive(Condition cond, const ValueOperand &value); - // register-based tests + // Register-based tests. Condition testInt32(Condition cond, Register tag); Condition testBoolean(Condition cond, Register tag); Condition testNull(Condition cond, Register tag); @@ -767,7 +770,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM void branchTestValue(Condition cond, const Address &valaddr, const ValueOperand &value, Label *label); - // unboxing code + // Unboxing code. void unboxNonDouble(const ValueOperand &operand, Register dest); void unboxNonDouble(const Address &src, Register dest); void unboxInt32(const ValueOperand &src, Register dest) { unboxNonDouble(src, dest); } @@ -789,7 +792,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM ma_eor(Imm32(1), val.payloadReg()); } - // boxing code + // Boxing code. void boxDouble(FloatRegister src, const ValueOperand &dest); void boxNonDouble(JSValueType type, Register src, const ValueOperand &dest); @@ -818,7 +821,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM void loadInt32OrDouble(Register base, Register index, FloatRegister dest, int32_t shift = defaultShift); void loadConstantDouble(double dp, FloatRegister dest); - // treat the value as a boolean, and set condition codes accordingly + // Treat the value as a boolean, and set condition codes accordingly. Condition testInt32Truthy(bool truthy, const ValueOperand &operand); Condition testBooleanTruthy(bool truthy, const ValueOperand &operand); Condition testDoubleTruthy(bool truthy, FloatRegister reg); @@ -1107,7 +1110,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM void storeValue(JSValueType type, Register reg, BaseIndex dest) { // Harder cases not handled yet. JS_ASSERT(dest.offset == 0); - ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add); + ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, OpAdd); storeValue(type, reg, Address(ScratchRegister, 0)); } void storeValue(ValueOperand val, const Address &dest) { @@ -1131,7 +1134,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM void storeValue(const Value &val, BaseIndex dest) { // Harder cases not handled yet. JS_ASSERT(dest.offset == 0); - ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add); + ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, OpAdd); storeValue(val, Address(ScratchRegister, 0)); } @@ -1250,7 +1253,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM void callWithExitFrame(JitCode *target, Register dynStack); // Makes an Ion call using the only two methods that it is sane for - // indep code to make a call + // independent code to make a call. void callIon(Register callee); void callIonFromAsmJS(Register callee); @@ -1379,8 +1382,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM } void clampIntToUint8(Register reg) { - // look at (reg >> 8) if it is 0, then reg shouldn't be clamped - // if it is <0, then we want to clamp to 0, otherwise, we wish to clamp to 255 + // Look at (reg >> 8) if it is 0, then reg shouldn't be clamped if it is + // <0, then we want to clamp to 0, otherwise, we wish to clamp to 255 as_mov(ScratchRegister, asr(reg, 8), SetCond); ma_mov(Imm32(0xff), reg, NoSetCond, NotEqual); ma_mov(Imm32(0), reg, NoSetCond, Signed); @@ -1423,7 +1426,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM void setStackArg(Register reg, uint32_t arg); void breakpoint(); - // conditional breakpoint + // Conditional breakpoint. void breakpoint(Condition cc); void compareDouble(FloatRegister lhs, FloatRegister rhs); @@ -1446,8 +1449,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM ma_lsl(imm, dest, dest); } - // If source is a double, load it into dest. If source is int32, - // convert it to double. Else, branch to failure. + // If source is a double, load it into dest. If source is int32, convert it + // to double. Else, branch to failure. void ensureDouble(const ValueOperand &source, FloatRegister dest, Label *failure); void @@ -1501,8 +1504,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM // Arguments must be assigned in a left-to-right order. This process may // temporarily use more stack, in which case esp-relative addresses will be // automatically adjusted. It is extremely important that esp-relative - // addresses are computed *after* setupABICall(). Furthermore, no - // operations should be emitted while setting arguments. + // addresses are computed *after* setupABICall(). Furthermore, no operations + // should be emitted while setting arguments. void passABIArg(const MoveOperand &from, MoveOp::Type type); void passABIArg(Register reg); void passABIArg(FloatRegister reg, MoveOp::Type type); @@ -1533,7 +1536,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM ma_add(address.base, Imm32(address.offset), dest, NoSetCond); } void computeEffectiveAddress(const BaseIndex &address, Register dest) { - ma_alu(address.base, lsl(address.index, address.scale), dest, op_add, NoSetCond); + ma_alu(address.base, lsl(address.index, address.scale), dest, OpAdd, NoSetCond); if (address.offset) ma_add(dest, Imm32(address.offset), dest, NoSetCond); } @@ -1545,8 +1548,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM void roundf(FloatRegister input, Register output, Label *handleNotAnInt, FloatRegister tmp); void clampCheck(Register r, Label *handleNotAnInt) { - // check explicitly for r == INT_MIN || r == INT_MAX - // this is the instruction sequence that gcc generated for this + // Check explicitly for r == INT_MIN || r == INT_MAX + // This is the instruction sequence that gcc generated for this // operation. ma_sub(r, Imm32(0x80000001), ScratchRegister); ma_cmn(ScratchRegister, Imm32(3)); diff --git a/js/src/jit/arm/MoveEmitter-arm.cpp b/js/src/jit/arm/MoveEmitter-arm.cpp index 6bd53a3eda61..91a427180e00 100644 --- a/js/src/jit/arm/MoveEmitter-arm.cpp +++ b/js/src/jit/arm/MoveEmitter-arm.cpp @@ -46,7 +46,7 @@ MoveEmitterARM::cycleSlot() const return Operand(StackPointer, offset); } -// THIS IS ALWAYS AN LDRAddr. It should not be wrapped in an operand, methinks +// THIS IS ALWAYS AN LDRAddr. It should not be wrapped in an operand, methinks. Operand MoveEmitterARM::spillSlot() const { @@ -83,12 +83,12 @@ MoveEmitterARM::tempReg() if (spilledReg_ != InvalidReg) return spilledReg_; - // For now, just pick r12/ip as the eviction point. This is totally - // random, and if it ends up being bad, we can use actual heuristics later. - // r12 is actually a bad choice. it is the scratch register, which is frequently - // used for address computations, such as those found when we attempt to access - // values more than 4096 off of the stack pointer. - // instead, use lr, the LinkRegister. + // For now, just pick r12/ip as the eviction point. This is totally random, + // and if it ends up being bad, we can use actual heuristics later. r12 is + // actually a bad choice. It is the scratch register, which is frequently + // used for address computations, such as those found when we attempt to + // access values more than 4096 off of the stack pointer. Instead, use lr, + // the LinkRegister. spilledReg_ = r14; if (pushedAtSpill_ == -1) { masm.Push(spilledReg_); diff --git a/js/src/jit/arm/Simulator-arm.cpp b/js/src/jit/arm/Simulator-arm.cpp index f09b06cc7f12..635fa0990fbc 100644 --- a/js/src/jit/arm/Simulator-arm.cpp +++ b/js/src/jit/arm/Simulator-arm.cpp @@ -191,54 +191,54 @@ class SimInstruction { inline int VFPMRegValue(VFPRegPrecision pre) { return VFPGlueRegValue(pre, 0, 5); } inline int VFPDRegValue(VFPRegPrecision pre) { return VFPGlueRegValue(pre, 12, 22); } - // Fields used in Data processing instructions + // Fields used in Data processing instructions. inline int opcodeValue() const { return static_cast(bits(24, 21)); } inline ALUOp opcodeField() const { return static_cast(bitField(24, 21)); } inline int sValue() const { return bit(20); } - // with register + // With register. inline int rmValue() const { return bits(3, 0); } inline ShiftType shifttypeValue() const { return static_cast(bits(6, 5)); } inline int rsValue() const { return bits(11, 8); } inline int shiftAmountValue() const { return bits(11, 7); } - // with immediate + // With immediate. inline int rotateValue() const { return bits(11, 8); } inline int immed8Value() const { return bits(7, 0); } inline int immed4Value() const { return bits(19, 16); } inline int immedMovwMovtValue() const { return immed4Value() << 12 | offset12Value(); } - // Fields used in Load/Store instructions + // Fields used in Load/Store instructions. inline int PUValue() const { return bits(24, 23); } inline int PUField() const { return bitField(24, 23); } inline int bValue() const { return bit(22); } inline int wValue() const { return bit(21); } inline int lValue() const { return bit(20); } - // with register uses same fields as Data processing instructions above - // with immediate + // With register uses same fields as Data processing instructions above with + // immediate. inline int offset12Value() const { return bits(11, 0); } - // multiple + // Multiple. inline int rlistValue() const { return bits(15, 0); } - // extra loads and stores + // Extra loads and stores. inline int signValue() const { return bit(6); } inline int hValue() const { return bit(5); } inline int immedHValue() const { return bits(11, 8); } inline int immedLValue() const { return bits(3, 0); } - // Fields used in Branch instructions + // Fields used in Branch instructions. inline int linkValue() const { return bit(24); } inline int sImmed24Value() const { return ((instructionBits() << 8) >> 8); } - // Fields used in Software interrupt instructions + // Fields used in Software interrupt instructions. inline SoftwareInterruptCodes svcValue() const { return static_cast(bits(23, 0)); } - // Test for special encodings of type 0 instructions (extra loads and stores, - // as well as multiplications). + // Test for special encodings of type 0 instructions (extra loads and + // stores, as well as multiplications). inline bool isSpecialType0() const { return (bit(7) == 1) && (bit(4) == 1); } // Test for miscellaneous instructions encodings of type 0 instructions. @@ -631,12 +631,13 @@ ReadLine(const char *prompt) } int len = strlen(line_buf); if (len > 0 && line_buf[len - 1] == '\n') { - // Since we read a new line we are done reading the line. This - // will exit the loop after copying this buffer into the result. + // Since we read a new line we are done reading the line. This will + // exit the loop after copying this buffer into the result. keep_going = false; } if (!result) { - // Allocate the initial result and make room for the terminating '\0' + // Allocate the initial result and make room for the terminating + // '\0'. result = (char *)js_malloc(len + 1); if (!result) return nullptr; @@ -693,7 +694,7 @@ ArmDebugger::debug() char arg2[ARG_SIZE + 1]; char *argv[3] = { cmd, arg1, arg2 }; - // make sure to have a proper terminating character if reaching the limit + // Make sure to have a proper terminating character if reaching the limit. cmd[COMMAND_SIZE] = 0; arg1[ARG_SIZE] = 0; arg2[ARG_SIZE] = 0; @@ -735,7 +736,8 @@ ArmDebugger::debug() sim_->set_pc(sim_->get_pc() + 4); sim_->icount_++; } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) { - // Execute the one instruction we broke at with breakpoints disabled. + // Execute the one instruction we broke at with breakpoints + // disabled. sim_->instructionDecode(reinterpret_cast(sim_->get_pc())); sim_->icount_++; // Leave the debugger shell. @@ -1076,7 +1078,7 @@ CheckICache(SimulatorRuntime::ICacheMap &i_cache, SimInstruction *instr) cache_page->cachedData(offset), SimInstruction::kInstrSize) == 0); } else { - // Cache miss. Load memory into the cache. + // Cache miss. Load memory into the cache. memcpy(cached_line, line, CachePage::kLineLength); *cache_valid_byte = CachePage::LINE_VALID; } @@ -1180,8 +1182,8 @@ Simulator::Simulator(SimulatorRuntime *srt) // When the generated code calls a VM function (masm.callWithABI) we need to // call that function instead of trying to execute it with the simulator -// (because it's x86 code instead of arm code). We do that by redirecting the -// VM call to a svc (Supervisor Call) instruction that is handled by the +// (because it's x86 code instead of arm code). We do that by redirecting the VM +// call to a svc (Supervisor Call) instruction that is handled by the // simulator. We write the original destination of the jump just at a known // offset from the svc instruction so the simulator knows what to call. class Redirection @@ -1275,8 +1277,8 @@ Simulator::set_register(int reg, int32_t value) registers_[reg] = value; } -// Get the register from the architecture state. This function does handle -// the special case of accessing the PC register. +// Get the register from the architecture state. This function does handle the +// special case of accessing the PC register. int32_t Simulator::get_register(int reg) const { @@ -1291,8 +1293,8 @@ Simulator::get_double_from_register_pair(int reg) { MOZ_ASSERT(reg >= 0 && reg < num_registers && (reg % 2) == 0); - // Read the bits from the unsigned integer register_[] array - // into the double precision floating point value and return it. + // Read the bits from the unsigned integer register_[] array into the double + // precision floating point value and return it. double dm_val = 0.0; char buffer[2 * sizeof(vfp_registers_[0])]; memcpy(buffer, ®isters_[reg], 2 * sizeof(registers_[0])); @@ -1630,8 +1632,8 @@ Simulator::overRecursedWithExtra(uint32_t extra) const return newsp <= stackLimit(); } -// Checks if the current instruction should be executed based on its -// condition bits. +// Checks if the current instruction should be executed based on its condition +// bits. bool Simulator::conditionallyExecute(SimInstruction *instr) { @@ -1704,14 +1706,14 @@ Simulator::overflowFrom(int32_t alu_out, int32_t left, int32_t right, bool addit { bool overflow; if (addition) { - // operands have the same sign + // Operands have the same sign. overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0)) - // and operands and result have different sign + // And operands and result have different sign. && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0)); } else { - // operands have different signs + // Operands have different signs. overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0)) - // and first operand and result have different signs + // And first operand and result have different signs. && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0)); } return overflow; @@ -2074,10 +2076,10 @@ typedef int32_t (*Prototype_Int_IntDouble)(int32_t arg0, double arg1); // Fill the volatile registers with scratch values. // // Some of the ABI calls assume that the float registers are not scratched, even -// though the ABI defines them as volatile - a performance optimization. These are -// all calls passing operands in integer registers, so for now the simulator does not -// scratch any float registers for these calls. Should try to narrow it further in -// future. +// though the ABI defines them as volatile - a performance optimization. These +// are all calls passing operands in integer registers, so for now the simulator +// does not scratch any float registers for these calls. Should try to narrow it +// further in future. // void Simulator::scratchVolatileRegisters(bool scratchFloat) @@ -2087,8 +2089,8 @@ Simulator::scratchVolatileRegisters(bool scratchFloat) set_register(r1, scratch_value); set_register(r2, scratch_value); set_register(r3, scratch_value); - set_register(r12, scratch_value); // Intra-Procedure-call scratch register - set_register(r14, scratch_value); // Link register + set_register(r12, scratch_value); // Intra-Procedure-call scratch register. + set_register(r14, scratch_value); // Link register. if (scratchFloat) { uint64_t scratch_value_d = 0x5a5a5a5a5a5a5a5aLU ^ uint64_t(icount_) ^ (uint64_t(icount_) << 30); @@ -2142,9 +2144,10 @@ Simulator::softwareInterrupt(SimInstruction *instr) case Args_General2: { Prototype_General2 target = reinterpret_cast(external); int64_t result = target(arg0, arg1); - // The ARM backend makes calls to __aeabi_idivmod and __aeabi_uidivmod assuming - // that the float registers are non-volatile as a performance optimization, so the - // float registers must not be scratch when calling these. + // The ARM backend makes calls to __aeabi_idivmod and + // __aeabi_uidivmod assuming that the float registers are + // non-volatile as a performance optimization, so the float + // registers must not be scratch when calling these. bool scratchFloat = target != __aeabi_idivmod && target != __aeabi_uidivmod; scratchVolatileRegisters(/* scratchFloat = */ scratchFloat); setCallResult(result); @@ -2306,8 +2309,8 @@ Simulator::softwareInterrupt(SimInstruction *instr) if (isWatchedStop(code)) increaseStopCounter(code); - // Stop if it is enabled, otherwise go on jumping over the stop - // and the message address. + // Stop if it is enabled, otherwise go on jumping over the stop and + // the message address. if (isEnabledStop(code)) { ArmDebugger dbg(this); dbg.stop(instr); @@ -2404,8 +2407,8 @@ Simulator::printStopInfo(uint32_t code) } } -// Instruction types 0 and 1 are both rolled into one function because they -// only differ in the handling of the shifter_operand. +// Instruction types 0 and 1 are both rolled into one function because they only +// differ in the handling of the shifter_operand. void Simulator::decodeType01(SimInstruction *instr) { @@ -2423,9 +2426,9 @@ Simulator::decodeType01(SimInstruction *instr) int32_t rm_val = get_register(rm); if (instr->bit(23) == 0) { if (instr->bit(21) == 0) { - // The MUL instruction description (A 4.1.33) refers to Rd as being - // the destination for the operation, but it confusingly uses the - // Rn field to encode it. + // The MUL instruction description (A 4.1.33) refers to + // Rd as being the destination for the operation, but it + // confusingly uses the Rn field to encode it. int rd = rn; // Remap the rn field to the Rd register. int32_t alu_out = rm_val * rs_val; set_register(rd, alu_out); @@ -2435,9 +2438,10 @@ Simulator::decodeType01(SimInstruction *instr) int rd = instr->rdValue(); int32_t acc_value = get_register(rd); if (instr->bit(22) == 0) { - // The MLA instruction description (A 4.1.28) refers to the order - // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the - // Rn field to encode the Rd register and the Rd field to encode + // The MLA instruction description (A 4.1.28) refers + // to the order of registers as "Rd, Rm, Rs, + // Rn". But confusingly it uses the Rn field to + // encode the Rd register and the Rd field to encode // the Rn register. int32_t mul_out = rm_val * rs_val; int32_t result = acc_value + mul_out; @@ -2449,9 +2453,9 @@ Simulator::decodeType01(SimInstruction *instr) } } } else { - // The signed/long multiply instructions use the terms RdHi and RdLo - // when referring to the target registers. They are mapped to the Rn - // and Rd fields as follows: + // The signed/long multiply instructions use the terms RdHi + // and RdLo when referring to the target registers. They are + // mapped to the Rn and Rd fields as follows: // RdLo == Rd // RdHi == Rn (This is confusingly stored in variable rd here // because the mul instruction from above uses the @@ -2469,7 +2473,7 @@ Simulator::decodeType01(SimInstruction *instr) hi_res = static_cast(result >> 32); lo_res = static_cast(result & 0xffffffff); } else { - // unsigned multiply + // Unsigned multiply. uint64_t left_op = static_cast(rm_val); uint64_t right_op = static_cast(rs_val); uint64_t result = left_op * right_op; @@ -2485,7 +2489,7 @@ Simulator::decodeType01(SimInstruction *instr) MOZ_CRASH(); // Not used atm. } } else { - // extra load/store instructions + // Extra load/store instructions. int rd = instr->rdValue(); int rn = instr->rnValue(); int32_t rn_val = get_register(rn); @@ -2587,7 +2591,7 @@ Simulator::decodeType01(SimInstruction *instr) } } } else { - // signed byte loads + // Signed byte loads. MOZ_ASSERT(instr->hasSign()); MOZ_ASSERT(instr->hasL()); int8_t val = readB(addr); @@ -2688,7 +2692,7 @@ Simulator::decodeType01(SimInstruction *instr) } int32_t alu_out; switch (instr->opcodeField()) { - case op_and: + case OpAnd: alu_out = rn_val & shifter_operand; set_register(rd, alu_out); if (instr->hasS()) { @@ -2696,7 +2700,7 @@ Simulator::decodeType01(SimInstruction *instr) setCFlag(shifter_carry_out); } break; - case op_eor: + case OpEor: alu_out = rn_val ^ shifter_operand; set_register(rd, alu_out); if (instr->hasS()) { @@ -2704,7 +2708,7 @@ Simulator::decodeType01(SimInstruction *instr) setCFlag(shifter_carry_out); } break; - case op_sub: + case OpSub: alu_out = rn_val - shifter_operand; set_register(rd, alu_out); if (instr->hasS()) { @@ -2713,7 +2717,7 @@ Simulator::decodeType01(SimInstruction *instr) setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, false)); } break; - case op_rsb: + case OpRsb: alu_out = shifter_operand - rn_val; set_register(rd, alu_out); if (instr->hasS()) { @@ -2722,7 +2726,7 @@ Simulator::decodeType01(SimInstruction *instr) setVFlag(overflowFrom(alu_out, shifter_operand, rn_val, false)); } break; - case op_add: + case OpAdd: alu_out = rn_val + shifter_operand; set_register(rd, alu_out); if (instr->hasS()) { @@ -2731,7 +2735,7 @@ Simulator::decodeType01(SimInstruction *instr) setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, true)); } break; - case op_adc: + case OpAdc: alu_out = rn_val + shifter_operand + getCarry(); set_register(rd, alu_out); if (instr->hasS()) { @@ -2740,11 +2744,11 @@ Simulator::decodeType01(SimInstruction *instr) setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, true)); } break; - case op_sbc: - case op_rsc: + case OpSbc: + case OpRsc: MOZ_CRASH(); break; - case op_tst: + case OpTst: if (instr->hasS()) { alu_out = rn_val & shifter_operand; setNZFlags(alu_out); @@ -2754,7 +2758,7 @@ Simulator::decodeType01(SimInstruction *instr) set_register(rd, alu_out); } break; - case op_teq: + case OpTeq: if (instr->hasS()) { alu_out = rn_val ^ shifter_operand; setNZFlags(alu_out); @@ -2765,7 +2769,7 @@ Simulator::decodeType01(SimInstruction *instr) MOZ_CRASH(); } break; - case op_cmp: + case OpCmp: if (instr->hasS()) { alu_out = rn_val - shifter_operand; setNZFlags(alu_out); @@ -2777,7 +2781,7 @@ Simulator::decodeType01(SimInstruction *instr) set_register(rd, alu_out); } break; - case op_cmn: + case OpCmn: if (instr->hasS()) { alu_out = rn_val + shifter_operand; setNZFlags(alu_out); @@ -2789,7 +2793,7 @@ Simulator::decodeType01(SimInstruction *instr) MOZ_CRASH(); } break; - case op_orr: + case OpOrr: alu_out = rn_val | shifter_operand; set_register(rd, alu_out); if (instr->hasS()) { @@ -2797,7 +2801,7 @@ Simulator::decodeType01(SimInstruction *instr) setCFlag(shifter_carry_out); } break; - case op_mov: + case OpMov: alu_out = shifter_operand; set_register(rd, alu_out); if (instr->hasS()) { @@ -2805,7 +2809,7 @@ Simulator::decodeType01(SimInstruction *instr) setCFlag(shifter_carry_out); } break; - case op_bic: + case OpBic: alu_out = rn_val & ~shifter_operand; set_register(rd, alu_out); if (instr->hasS()) { @@ -2813,7 +2817,7 @@ Simulator::decodeType01(SimInstruction *instr) setCFlag(shifter_carry_out); } break; - case op_mvn: + case OpMvn: alu_out = ~shifter_operand; set_register(rd, alu_out); if (instr->hasS()) { @@ -2942,9 +2946,9 @@ Simulator::decodeType3(SimInstruction *instr) else // ASR rm_val >>= shift; - // If saturation occurs, the Q flag should be set in the CPSR. - // There is no Q flag yet, and no instruction (MRS) to read the - // CPSR directly. + // If saturation occurs, the Q flag should be set in the + // CPSR. There is no Q flag yet, and no instruction (MRS) + // to read the CPSR directly. if (rm_val > sat_val) rm_val = sat_val; else if (rm_val < 0) @@ -3042,7 +3046,7 @@ Simulator::decodeType3(SimInstruction *instr) if (instr->bit(22) == 0x0 && instr->bit(20) == 0x1 && instr->bits(15,12) == 0x0f && instr->bits(7, 4) == 0x1) { if (!instr->hasW()) { - // sdiv (in V8 notation matching ARM ISA format) rn = rm/rs + // sdiv (in V8 notation matching ARM ISA format) rn = rm/rs. int rm = instr->rmValue(); int32_t rm_val = get_register(rm); int rs = instr->rsValue(); @@ -3056,7 +3060,7 @@ Simulator::decodeType3(SimInstruction *instr) set_register(rn, ret_val); return; } else { - // udiv (in V8 notation matching ARM ISA format) rn = rm/rs + // udiv (in V8 notation matching ARM ISA format) rn = rm/rs. int rm = instr->rmValue(); uint32_t rm_val = get_register(rm); int rs = instr->rsValue(); @@ -3149,7 +3153,8 @@ Simulator::decodeType3(SimInstruction *instr) void Simulator::decodeType4(SimInstruction *instr) { - MOZ_ASSERT(instr->bit(22) == 0); // Only allowed to be set in privileged mode. + // Only allowed to be set in privileged mode. + MOZ_ASSERT(instr->bit(22) == 0); bool load = instr->hasL(); handleRList(instr, load); } @@ -3194,7 +3199,7 @@ Simulator::decodeTypeVFP(SimInstruction *instr) if (instr->bit(4) == 0) { if (instr->opc1Value() == 0x7) { - // Other data processing instructions + // Other data processing instructions. if ((instr->opc2Value() == 0x0) && (instr->opc3Value() == 0x1)) { // vmov register to register. if (instr->szValue() == 0x1) { @@ -3238,7 +3243,7 @@ Simulator::decodeTypeVFP(SimInstruction *instr) decodeVCVTBetweenFloatingPointAndInteger(instr); } else if ((instr->opc2Value() == 0xA) && (instr->opc3Value() == 0x3) && (instr->bit(8) == 1)) { - // vcvt.f64.s32 Dd, Dd, # + // vcvt.f64.s32 Dd, Dd, #. int fraction_bits = 32 - ((instr->bits(3, 0) << 1) | instr->bit(5)); int fixed_value = get_sinteger_from_s_register(vd * 2); double divide = 1 << fraction_bits; @@ -3267,7 +3272,7 @@ Simulator::decodeTypeVFP(SimInstruction *instr) if (instr->szValue() == 0x1) { set_d_register_from_double(vd, instr->doubleImmedVmov()); } else { - // vmov.f32 immediate + // vmov.f32 immediate. set_s_register_from_float(vd, instr->float32ImmedVmov()); } } else { @@ -3333,8 +3338,8 @@ Simulator::decodeTypeVFP(SimInstruction *instr) const double dn_val = get_double_from_d_register(vn); const double dm_val = get_double_from_d_register(vm); - // Note: we do the mul and add/sub in separate steps to avoid getting a - // result with too high precision. + // Note: we do the mul and add/sub in separate steps to avoid + // getting a result with too high precision. set_d_register_from_double(vd, dn_val * dm_val); if (is_vmls) { set_d_register_from_double(vd, @@ -3369,7 +3374,7 @@ Simulator::decodeTypeVFP(SimInstruction *instr) } else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x1) && (instr->bit(23) == 0x0)) { - // vmov (ARM core register to scalar) + // vmov (ARM core register to scalar). int vd = instr->bits(19, 16) | (instr->bit(7) << 4); double dd_value = get_double_from_d_register(vd); int32_t data[2]; @@ -3380,7 +3385,7 @@ Simulator::decodeTypeVFP(SimInstruction *instr) } else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1) && (instr->bit(23) == 0x0)) { - // vmov (scalar to ARM core register) + // vmov (scalar to ARM core register). int vn = instr->bits(19, 16) | (instr->bit(7) << 4); double dn_value = get_double_from_d_register(vn); int32_t data[2]; @@ -3539,8 +3544,8 @@ get_inv_op_vfp_flag(VFPRoundingMode mode, double val, bool unsigned_) if (val != val) return true; - // Check for overflow. This code works because 32bit integers can be - // exactly represented by ieee-754 64bit floating-point values. + // Check for overflow. This code works because 32bit integers can be exactly + // represented by ieee-754 64bit floating-point values. switch (mode) { case SimRN: return unsigned_ ? (val >= (max_uint + 0.5)) || @@ -3592,15 +3597,14 @@ Simulator::decodeVCVTBetweenFloatingPointAndInteger(SimInstruction *instr) // We are playing with code close to the C++ standard's limits below, // hence the very simple code and heavy checks. // - // Note: - // C++ defines default type casting from floating point to integer as - // (close to) rounding toward zero ("fractional part discarded"). + // Note: C++ defines default type casting from floating point to integer + // as (close to) rounding toward zero ("fractional part discarded"). int dst = instr->VFPDRegValue(kSinglePrecision); int src = instr->VFPMRegValue(src_precision); - // Bit 7 in vcvt instructions indicates if we should use the FPSCR rounding - // mode or the default Round to Zero mode. + // Bit 7 in vcvt instructions indicates if we should use the FPSCR + // rounding mode or the default Round to Zero mode. VFPRoundingMode mode = (instr->bit(7) != 1) ? FPSCR_rounding_mode_ : SimRZ; MOZ_ASSERT(mode == SimRM || mode == SimRZ || mode == SimRN); @@ -3696,8 +3700,8 @@ Simulator::decodeVCVTBetweenFloatingPointAndIntegerFrac(SimInstruction *instr) // We are playing with code close to the C++ standard's limits below, // hence the very simple code and heavy checks. // - // Note: C++ defines default type casting from floating point to integer as - // (close to) rounding toward zero ("fractional part discarded"). + // Note: C++ defines default type casting from floating point to integer + // as (close to) rounding toward zero ("fractional part discarded"). int dst = instr->VFPDRegValue(precision); @@ -3711,8 +3715,8 @@ Simulator::decodeVCVTBetweenFloatingPointAndIntegerFrac(SimInstruction *instr) // Scale value by specified number of fraction bits. val *= mult; - // Rounding down towards zero. No need to account for the rounding error as this - // instruction always rounds down towards zero. See SimRZ below. + // Rounding down towards zero. No need to account for the rounding error + // as this instruction always rounds down towards zero. See SimRZ below. int temp = unsigned_integer ? static_cast(val) : static_cast(val); inv_op_vfp_flag_ = get_inv_op_vfp_flag(SimRZ, val, unsigned_integer); @@ -3877,7 +3881,7 @@ Simulator::decodeSpecialCondition(SimInstruction *instr) break; case 7: if (instr->bits(18, 16) == 0 && instr->bits(11, 6) == 0x28 && instr->bit(4) == 1) { - // vmovl unsigned + // vmovl unsigned. int Vd = (instr->bit(22) << 4) | instr->vdValue(); int Vm = (instr->bit(5) << 4) | instr->vmValue(); int imm3 = instr->bits(21, 19); @@ -4038,8 +4042,8 @@ Simulator::instructionDecode(SimInstruction *instr) MOZ_CRASH(); break; } - // If the instruction is a non taken conditional stop, we need to skip the - // inlined message address. + // If the instruction is a non taken conditional stop, we need to skip + // the inlined message address. } else if (instr->isStop()) { set_pc(get_pc() + 2 * SimInstruction::kInstrSize); } @@ -4052,8 +4056,8 @@ template void Simulator::execute() { - // Get the PC to simulate. Cannot use the accessor here as we need the - // raw PC value and not the one used as input to arithmetic instructions. + // Get the PC to simulate. Cannot use the accessor here as we need the raw + // PC value and not the one used as input to arithmetic instructions. int program_counter = get_pc(); AsmJSActivation *activation = TlsPerThreadData.get()->asmJSActivationStackFromOwnerThread(); @@ -4090,9 +4094,9 @@ Simulator::callInternal(uint8_t *entry) // the LR the simulation stops when returning to this call point. set_register(lr, end_sim_pc); - // Remember the values of callee-saved registers. - // The code below assumes that r9 is not used as sb (static base) in - // simulator code and therefore is regarded as a callee-saved register. + // Remember the values of callee-saved registers. The code below assumes + // that r9 is not used as sb (static base) in simulator code and therefore + // is regarded as a callee-saved register. int32_t r4_val = get_register(r4); int32_t r5_val = get_register(r5); int32_t r6_val = get_register(r6); @@ -4142,7 +4146,7 @@ Simulator::callInternal(uint8_t *entry) set_d_register(d14, &callee_saved_value_d); set_d_register(d15, &callee_saved_value_d); - // Start the simulation + // Start the simulation. if (Simulator::StopSimAt != -1L) execute(); else diff --git a/js/src/jit/arm/Simulator-arm.h b/js/src/jit/arm/Simulator-arm.h index 2d35bd4ef451..d15c048c3c42 100644 --- a/js/src/jit/arm/Simulator-arm.h +++ b/js/src/jit/arm/Simulator-arm.h @@ -172,7 +172,7 @@ class Simulator // Known bad pc value to ensure that the simulator does not execute // without being properly setup. bad_lr = -1, - // A pc value used to signal the simulator to stop execution. Generally + // A pc value used to signal the simulator to stop execution. Generally // the lr is set to this value on transition from native C code to // simulated execution, so that the simulator can "return" to the native // C code. diff --git a/js/src/jit/arm/Trampoline-arm.cpp b/js/src/jit/arm/Trampoline-arm.cpp index 6147c5f21bad..89d52ef869a1 100644 --- a/js/src/jit/arm/Trampoline-arm.cpp +++ b/js/src/jit/arm/Trampoline-arm.cpp @@ -37,7 +37,7 @@ static const FloatRegisterSet NonVolatileFloatRegs = static void GenerateReturn(MacroAssembler &masm, int returnCode, SPSProfiler *prof) { - // Restore non-volatile floating point registers + // Restore non-volatile floating point registers. masm.transferMultipleByRuns(NonVolatileFloatRegs, IsLoad, StackPointer, IA); // Unwind the sps mark. @@ -59,7 +59,7 @@ GenerateReturn(MacroAssembler &masm, int returnCode, SPSProfiler *prof) // r12 isn't saved, so it shouldn't be restored. masm.transferReg(pc); masm.finishDataTransfer(); - masm.dumpPool(); + masm.flushBuffer(); } struct EnterJITStack @@ -75,7 +75,7 @@ struct EnterJITStack size_t hasSPSMark; - // non-volatile registers. + // Non-volatile registers. void *r4; void *r5; void *r6; @@ -133,7 +133,7 @@ JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type) // The 5th argument is located at [sp, 36] masm.finishDataTransfer(); - // Push the EnterJIT sps mark. "Frame pointer" = start of saved core regs. + // Push the EnterJIT sps mark. "Frame pointer" = start of saved core regs. masm.movePtr(sp, r8); masm.spsMarkJit(&cx->runtime()->spsProfiler, r8, r9); @@ -154,26 +154,27 @@ JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type) masm.loadPtr(slot_vp, r10); masm.unboxInt32(Address(r10, 0), r10); - // Subtract off the size of the arguments from the stack pointer, store elsewhere + // Subtract off the size of the arguments from the stack pointer, store + // elsewhere. aasm->as_sub(r4, sp, O2RegImmShift(r1, LSL, 3)); //r4 = sp - argc*8 - // Get the final position of the stack pointer into the stack pointer + // Get the final position of the stack pointer into the stack pointer. aasm->as_sub(sp, r4, Imm8(16)); // sp' = sp - argc*8 - 16 - // Get a copy of the number of args to use as a decrement counter, also - // Set the zero condition code + // Get a copy of the number of args to use as a decrement counter, also set + // the zero condition code. aasm->as_mov(r5, O2Reg(r1), SetCond); // Loop over arguments, copying them from an unknown buffer onto the Ion // stack so they can be accessed from JIT'ed code. { Label header, footer; - // If there aren't any arguments, don't do anything + // If there aren't any arguments, don't do anything. aasm->as_b(&footer, Assembler::Zero); - // Get the top of the loop + // Get the top of the loop. masm.bind(&header); aasm->as_sub(r5, r5, Imm8(1), SetCond); // We could be more awesome, and unroll this, using a loadm - // (particularly since the offset is effectively 0) - // but that seems more error prone, and complex. + // (particularly since the offset is effectively 0) but that seems more + // error prone, and complex. // BIG FAT WARNING: this loads both r6 and r7. aasm->as_extdtr(IsLoad, 64, true, PostIndex, r6, EDtrAddr(r2, EDtrOffImm(8))); aasm->as_extdtr(IsStore, 64, true, PostIndex, r6, EDtrAddr(r4, EDtrOffImm(8))); @@ -211,9 +212,9 @@ JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type) masm.load32(slot_numStackValues, numStackValues); // Write return address. On ARM, CodeLabel is only used for tableswitch, - // so we can't use it here to get the return address. Instead, we use - // pc + a fixed offset to a jump to returnLabel. The pc register holds - // pc + 8, so we add the size of 2 instructions to skip the instructions + // so we can't use it here to get the return address. Instead, we use pc + // + a fixed offset to a jump to returnLabel. The pc register holds pc + + // 8, so we add the size of 2 instructions to skip the instructions // emitted by storePtr and jump(&skipJump). { AutoForbidPools afp(&masm); @@ -235,7 +236,8 @@ JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type) masm.mov(sp, framePtr); #ifdef XP_WIN - // Can't push large frames blindly on windows. Touch frame memory incrementally. + // Can't push large frames blindly on windows. Touch frame memory + // incrementally. masm.ma_lsl(Imm32(3), numStackValues, scratch); masm.subPtr(scratch, framePtr); { @@ -287,7 +289,7 @@ JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type) masm.jump(jitcode); - // OOM: load error value, discard return address and previous frame + // OOM: Load error value, discard return address and previous frame // pointer and return. masm.bind(&error); masm.mov(framePtr, sp); @@ -309,12 +311,12 @@ JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type) masm.bind(&returnLabel); } - // The top of the stack now points to the address of the field following - // the return address because the return address is popped for the - // return, so we need to remove the size of the return address field. + // The top of the stack now points to the address of the field following the + // return address because the return address is popped for the return, so we + // need to remove the size of the return address field. aasm->as_sub(sp, sp, Imm8(4)); - // Load off of the stack the size of our local stack + // Load off of the stack the size of our local stack. masm.loadPtr(Address(sp, IonJSFrameLayout::offsetOfDescriptor()), r5); aasm->as_add(sp, sp, lsr(r5, FRAMESIZE_SHIFT)); @@ -323,9 +325,8 @@ JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type) masm.storeValue(JSReturnOperand, Address(r5, 0)); // :TODO: Optimize storeValue with: - // We're using a load-double here. In order for that to work, - // the data needs to be stored in two consecutive registers, - // make sure this is the case + // We're using a load-double here. In order for that to work, the data needs + // to be stored in two consecutive registers, make sure this is the case // JS_ASSERT(JSReturnReg_Type.code() == JSReturnReg_Data.code()+1); // aasm->as_extdtr(IsStore, 64, true, Offset, // JSReturnReg_Data, EDtrAddr(r5, EDtrOffImm(0))); @@ -350,16 +351,16 @@ JitRuntime::generateInvalidator(JSContext *cx) // See large comment in x86's JitRuntime::generateInvalidator. MacroAssembler masm(cx); //masm.as_bkpt(); - // At this point, one of two things has happened. + // At this point, one of two things has happened: // 1) Execution has just returned from C code, which left the stack aligned // 2) Execution has just returned from Ion code, which left the stack unaligned. - // The old return address should not matter, but we still want the - // stack to be aligned, and there is no good reason to automatically align it with - // a call to setupUnalignedABICall. + // The old return address should not matter, but we still want the stack to + // be aligned, and there is no good reason to automatically align it with a + // call to setupUnalignedABICall. masm.ma_and(Imm32(~7), sp, sp); masm.startDataTransferM(IsStore, sp, DB, WriteBack); // We don't have to push everything, but this is likely easier. - // setting regs_ + // Setting regs_. for (uint32_t i = 0; i < Registers::Total; i++) masm.transferReg(Register::FromCode(i)); masm.finishDataTransfer(); @@ -385,10 +386,11 @@ JitRuntime::generateInvalidator(JSContext *cx) masm.ma_ldr(Address(sp, 0), r2); masm.ma_ldr(Address(sp, sizeOfBailoutInfo), r1); // Remove the return address, the IonScript, the register state - // (InvaliationBailoutStack) and the space that was allocated for the return value + // (InvaliationBailoutStack) and the space that was allocated for the return + // value. masm.ma_add(sp, Imm32(sizeof(InvalidationBailoutStack) + sizeOfRetval + sizeOfBailoutInfo), sp); - // remove the space that this frame was using before the bailout - // (computed by InvalidationBailout) + // Remove the space that this frame was using before the bailout (computed + // by InvalidationBailout) masm.ma_add(sp, r1, sp); // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2. @@ -415,7 +417,7 @@ JitRuntime::generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void * // Including |this|, there are (|nargs| + 1) arguments to copy. JS_ASSERT(ArgumentsRectifierReg == r8); - // Copy number of actual arguments into r0 + // Copy number of actual arguments into r0. masm.ma_ldr(DTRAddr(sp, DtrOffImm(IonRectifierFrameLayout::offsetOfNumActualArgs())), r0); // Load the number of |undefined|s to push into r6. @@ -441,7 +443,7 @@ JitRuntime::generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void * // Get the topmost argument. - masm.ma_alu(r3, lsl(r8, 3), r3, op_add); // r3 <- r3 + nargs * 8 + masm.ma_alu(r3, lsl(r8, 3), r3, OpAdd); // r3 <- r3 + nargs * 8 masm.ma_add(r3, Imm32(sizeof(IonRectifierFrameLayout)), r3); // Push arguments, |nargs| + 1 times (to include |this|). @@ -495,7 +497,7 @@ JitRuntime::generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void * // return address // Discard pushed arguments. - masm.ma_alu(sp, lsr(r4, FRAMESIZE_SHIFT), sp, op_add); + masm.ma_alu(sp, lsr(r4, FRAMESIZE_SHIFT), sp, OpAdd); masm.ret(); Linker linker(masm); @@ -524,12 +526,12 @@ PushBailoutFrame(MacroAssembler &masm, uint32_t frameClass, Register spArg) // bailoutFrame.snapshotOffset // bailoutFrame.frameSize - // STEP 1a: save our register sets to the stack so Bailout() can - // read everything. + // STEP 1a: Save our register sets to the stack so Bailout() can read + // everything. // sp % 8 == 0 masm.startDataTransferM(IsStore, sp, DB, WriteBack); // We don't have to push everything, but this is likely easier. - // setting regs_ + // Setting regs_. for (uint32_t i = 0; i < Registers::Total; i++) masm.transferReg(Register::FromCode(i)); masm.finishDataTransfer(); @@ -539,24 +541,24 @@ PushBailoutFrame(MacroAssembler &masm, uint32_t frameClass, Register spArg) masm.transferFloatReg(FloatRegister::FromCode(i)); masm.finishFloatTransfer(); - // STEP 1b: Push both the "return address" of the function call (the - // address of the instruction after the call that we used to get - // here) as well as the callee token onto the stack. The return - // address is currently in r14. We will proceed by loading the - // callee token into a sacrificial register <= r14, then pushing - // both onto the stack + // STEP 1b: Push both the "return address" of the function call (the address + // of the instruction after the call that we used to get here) as + // well as the callee token onto the stack. The return address is + // currently in r14. We will proceed by loading the callee token + // into a sacrificial register <= r14, then pushing both onto the + // stack. - // now place the frameClass onto the stack, via a register + // Now place the frameClass onto the stack, via a register. masm.ma_mov(Imm32(frameClass), r4); - // And onto the stack. Since the stack is full, we need to put this - // one past the end of the current stack. Sadly, the ABI says that we need - // to always point to the lowest place that has been written. the OS is - // free to do whatever it wants below sp. + // And onto the stack. Since the stack is full, we need to put this one past + // the end of the current stack. Sadly, the ABI says that we need to always + // point to the lowest place that has been written. The OS is free to do + // whatever it wants below sp. masm.startDataTransferM(IsStore, sp, DB, WriteBack); - // set frameClassId_ + // Set frameClassId_. masm.transferReg(r4); - // Set tableOffset_; higher registers are stored at higher locations on - // the stack. + // Set tableOffset_; higher registers are stored at higher locations on the + // stack. masm.transferReg(lr); masm.finishDataTransfer(); @@ -570,17 +572,16 @@ GenerateBailoutThunk(JSContext *cx, MacroAssembler &masm, uint32_t frameClass) // SP % 8 == 4 // STEP 1c: Call the bailout function, giving a pointer to the - // structure we just blitted onto the stack + // structure we just blitted onto the stack. const int sizeOfBailoutInfo = sizeof(void *)*2; masm.reserveStack(sizeOfBailoutInfo); masm.mov(sp, r1); masm.setupAlignedABICall(2); - // Decrement sp by another 4, so we keep alignment - // Not Anymore! pushing both the snapshotoffset as well as the - // masm.as_sub(sp, sp, Imm8(4)); + // Decrement sp by another 4, so we keep alignment. Not Anymore! Pushing + // both the snapshotoffset as well as the: masm.as_sub(sp, sp, Imm8(4)); - // Set the old (4-byte aligned) value of the sp as the first argument + // Set the old (4-byte aligned) value of the sp as the first argument. masm.passABIArg(r0); masm.passABIArg(r1); @@ -594,11 +595,11 @@ GenerateBailoutThunk(JSContext *cx, MacroAssembler &masm, uint32_t frameClass) sizeof(void *) * Registers::Total; if (frameClass == NO_FRAME_SIZE_CLASS_ID) { - // Make sure the bailout frame size fits into the offset for a load + // Make sure the bailout frame size fits into the offset for a load. masm.as_dtr(IsLoad, 32, Offset, r4, DTRAddr(sp, DtrOffImm(4))); - // used to be: offsetof(BailoutStack, frameSize_) - // this structure is no longer available to us :( + // Used to be: offsetof(BailoutStack, frameSize_) + // This structure is no longer available to us :( // We add 12 to the bailoutFrameSize because: // sizeof(uint32_t) for the tableOffset that was pushed onto the stack // sizeof(uintptr_t) for the snapshotOffset; @@ -607,9 +608,14 @@ GenerateBailoutThunk(JSContext *cx, MacroAssembler &masm, uint32_t frameClass) masm.as_add(sp, sp, O2Reg(r4)); } else { uint32_t frameSize = FrameSizeClass::FromClass(frameClass).frameSize(); - masm.ma_add(Imm32(frameSize // the frame that was added when we entered the most recent function - + sizeof(void*) // the size of the "return address" that was dumped on the stack - + bailoutFrameSize) // everything else that was pushed on the stack + masm.ma_add(Imm32(// The frame that was added when we entered the most + // recent function. + frameSize + // The size of the "return address" that was dumped on + // the stack. + + sizeof(void*) + // Everything else that was pushed on the stack. + + bailoutFrameSize) , sp); } @@ -621,14 +627,15 @@ GenerateBailoutThunk(JSContext *cx, MacroAssembler &masm, uint32_t frameClass) static void GenerateParallelBailoutThunk(MacroAssembler &masm, uint32_t frameClass) { - // As GenerateBailoutThunk, except we return an error immediately. We do - // the bailout dance so that we can walk the stack and have accurate - // reporting of frame information. + // As GenerateBailoutThunk, except we return an error immediately. We do the + // bailout dance so that we can walk the stack and have accurate reporting + // of frame information. PushBailoutFrame(masm, frameClass, r0); // Parallel bailout is like parallel failure in that we unwind all the way - // to the entry frame. Reserve space for the frame pointer of the entry frame. + // to the entry frame. Reserve space for the frame pointer of the entry + // frame. const int sizeOfEntryFramePointer = sizeof(uint8_t *) * 2; masm.reserveStack(sizeOfEntryFramePointer); masm.mov(sp, r1); @@ -786,8 +793,8 @@ JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f) argDisp += sizeof(void *); break; case VMFunction::DoubleByValue: - // Values should be passed by reference, not by value, so we - // assert that the argument is a double-precision float. + // Values should be passed by reference, not by value, so we assert + // that the argument is a double-precision float. JS_ASSERT(f.argPassedInFloatReg(explicitArg)); masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE); argDisp += sizeof(double); @@ -935,9 +942,9 @@ JitRuntime::generateDebugTrapHandler(JSContext *cx) masm.mov(r11, scratch1); masm.subPtr(Imm32(BaselineFrame::Size()), scratch1); - // Enter a stub frame and call the HandleDebugTrap VM function. Ensure - // the stub frame has a nullptr ICStub pointer, since this pointer is - // marked during GC. + // Enter a stub frame and call the HandleDebugTrap VM function. Ensure the + // stub frame has a nullptr ICStub pointer, since this pointer is marked + // during GC. masm.movePtr(ImmPtr(nullptr), BaselineStubReg); EmitEnterStubFrame(masm, scratch2); @@ -951,9 +958,9 @@ JitRuntime::generateDebugTrapHandler(JSContext *cx) EmitLeaveStubFrame(masm); - // If the stub returns |true|, we have to perform a forced return - // (return from the JS frame). If the stub returns |false|, just return - // from the trap stub so that execution continues at the current pc. + // If the stub returns |true|, we have to perform a forced return (return + // from the JS frame). If the stub returns |false|, just return from the + // trap stub so that execution continues at the current pc. Label forcedReturn; masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn); masm.mov(lr, pc); diff --git a/js/src/jit/mips/Assembler-mips.cpp b/js/src/jit/mips/Assembler-mips.cpp index 78d9700a5826..a3883e9b1333 100644 --- a/js/src/jit/mips/Assembler-mips.cpp +++ b/js/src/jit/mips/Assembler-mips.cpp @@ -155,7 +155,7 @@ jit::PatchJump(CodeLocationJump &jump_, CodeLocationLabel label) Instruction *inst1 = (Instruction *)jump_.raw(); Instruction *inst2 = inst1->next(); - Assembler::updateLuiOriValue(inst1, inst2, (uint32_t)label.raw()); + Assembler::UpdateLuiOriValue(inst1, inst2, (uint32_t)label.raw()); AutoFlushICache::flush(uintptr_t(inst1), 8); } @@ -177,8 +177,8 @@ Assembler::executableCopy(uint8_t *buffer) for (size_t i = 0; i < longJumps_.length(); i++) { Instruction *inst1 = (Instruction *) ((uint32_t)buffer + longJumps_[i]); - uint32_t value = extractLuiOriValue(inst1, inst1->next()); - updateLuiOriValue(inst1, inst1->next(), (uint32_t)buffer + value); + uint32_t value = ExtractLuiOriValue(inst1, inst1->next()); + UpdateLuiOriValue(inst1, inst1->next(), (uint32_t)buffer + value); } AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size()); @@ -226,16 +226,16 @@ class RelocationIterator }; uintptr_t -Assembler::getPointer(uint8_t *instPtr) +Assembler::GetPointer(uint8_t *instPtr) { Instruction *inst = (Instruction*)instPtr; - return Assembler::extractLuiOriValue(inst, inst->next()); + return Assembler::ExtractLuiOriValue(inst, inst->next()); } static JitCode * CodeFromJump(Instruction *jump) { - uint8_t *target = (uint8_t *)Assembler::extractLuiOriValue(jump, jump->next()); + uint8_t *target = (uint8_t *)Assembler::ExtractLuiOriValue(jump, jump->next()); return JitCode::FromExecutable(target); } @@ -255,7 +255,7 @@ TraceDataRelocations(JSTracer *trc, uint8_t *buffer, CompactBufferReader &reader while (reader.more()) { size_t offset = reader.readUnsigned(); Instruction *inst = (Instruction*)(buffer + offset); - void *ptr = (void *)Assembler::extractLuiOriValue(inst, inst->next()); + void *ptr = (void *)Assembler::ExtractLuiOriValue(inst, inst->next()); // No barrier needed since these are constants. gc::MarkGCThingUnbarriered(trc, reinterpret_cast(&ptr), "ion-masm-ptr"); @@ -269,7 +269,7 @@ TraceDataRelocations(JSTracer *trc, MIPSBuffer *buffer, CompactBufferReader &rea BufferOffset bo (reader.readUnsigned()); MIPSBuffer::AssemblerBufferInstIterator iter(bo, buffer); - void *ptr = (void *)Assembler::extractLuiOriValue(iter.cur(), iter.next()); + void *ptr = (void *)Assembler::ExtractLuiOriValue(iter.cur(), iter.next()); // No barrier needed since these are constants. gc::MarkGCThingUnbarriered(trc, reinterpret_cast(&ptr), "ion-masm-ptr"); @@ -330,9 +330,9 @@ Assembler::processCodeLabels(uint8_t *rawCode) } int32_t -Assembler::extractCodeLabelOffset(uint8_t *code) { +Assembler::ExtractCodeLabelOffset(uint8_t *code) { InstImm *inst = (InstImm *)code; - return Assembler::extractLuiOriValue(inst, inst->next()); + return Assembler::ExtractLuiOriValue(inst, inst->next()); } void @@ -342,8 +342,8 @@ Assembler::Bind(uint8_t *rawCode, AbsoluteLabel *label, const void *address) int32_t src = label->offset(); do { Instruction *inst = (Instruction *) (rawCode + src); - uint32_t next = Assembler::extractLuiOriValue(inst, inst->next()); - Assembler::updateLuiOriValue(inst, inst->next(), (uint32_t)address); + uint32_t next = Assembler::ExtractLuiOriValue(inst, inst->next()); + Assembler::UpdateLuiOriValue(inst, inst->next(), (uint32_t)address); src = next; } while (src != AbsoluteLabel::INVALID_OFFSET); } @@ -490,12 +490,12 @@ Assembler::writeInst(uint32_t x, uint32_t *dest) if (dest == nullptr) return m_buffer.putInt(x); - writeInstStatic(x, dest); + WriteInstStatic(x, dest); return BufferOffset(); } void -Assembler::writeInstStatic(uint32_t x, uint32_t *dest) +Assembler::WriteInstStatic(uint32_t x, uint32_t *dest) { MOZ_ASSERT(dest != nullptr); *dest = x; @@ -557,21 +557,21 @@ Assembler::as_nor(Register rd, Register rs, Register rt) BufferOffset Assembler::as_andi(Register rd, Register rs, int32_t j) { - MOZ_ASSERT(Imm16::isInUnsignedRange(j)); + MOZ_ASSERT(Imm16::IsInUnsignedRange(j)); return writeInst(InstImm(op_andi, rs, rd, Imm16(j)).encode()); } BufferOffset Assembler::as_ori(Register rd, Register rs, int32_t j) { - MOZ_ASSERT(Imm16::isInUnsignedRange(j)); + MOZ_ASSERT(Imm16::IsInUnsignedRange(j)); return writeInst(InstImm(op_ori, rs, rd, Imm16(j)).encode()); } BufferOffset Assembler::as_xori(Register rd, Register rs, int32_t j) { - MOZ_ASSERT(Imm16::isInUnsignedRange(j)); + MOZ_ASSERT(Imm16::IsInUnsignedRange(j)); return writeInst(InstImm(op_xori, rs, rd, Imm16(j)).encode()); } @@ -672,7 +672,7 @@ Assembler::as_addu(Register rd, Register rs, Register rt) BufferOffset Assembler::as_addiu(Register rd, Register rs, int32_t j) { - MOZ_ASSERT(Imm16::isInSignedRange(j)); + MOZ_ASSERT(Imm16::IsInSignedRange(j)); return writeInst(InstImm(op_addiu, rs, rd, Imm16(j)).encode()); } @@ -715,7 +715,7 @@ Assembler::as_mul(Register rd, Register rs, Register rt) BufferOffset Assembler::as_lui(Register rd, int32_t j) { - MOZ_ASSERT(Imm16::isInUnsignedRange(j)); + MOZ_ASSERT(Imm16::IsInUnsignedRange(j)); return writeInst(InstImm(op_lui, zero, rd, Imm16(j)).encode()); } @@ -874,14 +874,14 @@ Assembler::as_sltu(Register rd, Register rs, Register rt) BufferOffset Assembler::as_slti(Register rd, Register rs, int32_t j) { - MOZ_ASSERT(Imm16::isInSignedRange(j)); + MOZ_ASSERT(Imm16::IsInSignedRange(j)); return writeInst(InstImm(op_slti, rs, rd, Imm16(j)).encode()); } BufferOffset Assembler::as_sltiu(Register rd, Register rs, uint32_t j) { - MOZ_ASSERT(Imm16::isInUnsignedRange(j)); + MOZ_ASSERT(Imm16::IsInUnsignedRange(j)); return writeInst(InstImm(op_sltiu, rs, rd, Imm16(j)).encode()); } @@ -943,28 +943,28 @@ Assembler::as_ext(Register rt, Register rs, uint16_t pos, uint16_t size) BufferOffset Assembler::as_ld(FloatRegister fd, Register base, int32_t off) { - MOZ_ASSERT(Imm16::isInSignedRange(off)); + MOZ_ASSERT(Imm16::IsInSignedRange(off)); return writeInst(InstImm(op_ldc1, base, fd, Imm16(off)).encode()); } BufferOffset Assembler::as_sd(FloatRegister fd, Register base, int32_t off) { - MOZ_ASSERT(Imm16::isInSignedRange(off)); + MOZ_ASSERT(Imm16::IsInSignedRange(off)); return writeInst(InstImm(op_sdc1, base, fd, Imm16(off)).encode()); } BufferOffset Assembler::as_ls(FloatRegister fd, Register base, int32_t off) { - MOZ_ASSERT(Imm16::isInSignedRange(off)); + MOZ_ASSERT(Imm16::IsInSignedRange(off)); return writeInst(InstImm(op_lwc1, base, fd, Imm16(off)).encode()); } BufferOffset Assembler::as_ss(FloatRegister fd, Register base, int32_t off) { - MOZ_ASSERT(Imm16::isInSignedRange(off)); + MOZ_ASSERT(Imm16::IsInSignedRange(off)); return writeInst(InstImm(op_swc1, base, fd, Imm16(off)).encode()); } @@ -1253,7 +1253,7 @@ Assembler::bind(InstImm *inst, uint32_t branch, uint32_t target) // If encoded offset is 4, then the jump must be short if (BOffImm16(inst[0]).decode() == 4) { - MOZ_ASSERT(BOffImm16::isInRange(offset)); + MOZ_ASSERT(BOffImm16::IsInRange(offset)); inst[0].setBOffImm16(BOffImm16(offset)); inst[1].makeNop(); return; @@ -1263,13 +1263,13 @@ Assembler::bind(InstImm *inst, uint32_t branch, uint32_t target) // address after the reserved block. if (inst[0].encode() == inst_bgezal.encode()) { addLongJump(BufferOffset(branch)); - writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target); + WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, target); inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode(); // There is 1 nop after this. return; } - if (BOffImm16::isInRange(offset)) { + if (BOffImm16::IsInRange(offset)) { bool conditional = (inst[0].encode() != inst_bgezal.encode() && inst[0].encode() != inst_beq.encode()); @@ -1287,7 +1287,7 @@ Assembler::bind(InstImm *inst, uint32_t branch, uint32_t target) if (inst[0].encode() == inst_beq.encode()) { // Handle long unconditional jump. addLongJump(BufferOffset(branch)); - writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target); + WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, target); inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode(); // There is 1 nop after this. } else { @@ -1295,7 +1295,7 @@ Assembler::bind(InstImm *inst, uint32_t branch, uint32_t target) inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void *))); // No need for a "nop" here because we can clobber scratch. addLongJump(BufferOffset(branch + sizeof(void *))); - writeLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, target); + WriteLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, target); inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode(); // There is 1 nop after this. } @@ -1312,7 +1312,7 @@ Assembler::bind(RepatchLabel *label) Instruction *inst1 = editSrc(b); Instruction *inst2 = inst1->next(); - updateLuiOriValue(inst1, inst2, dest.getOffset()); + UpdateLuiOriValue(inst1, inst2, dest.getOffset()); } label->bind(dest.getOffset()); } @@ -1363,32 +1363,32 @@ Assembler::as_break(uint32_t code) } uint32_t -Assembler::patchWrite_NearCallSize() +Assembler::PatchWrite_NearCallSize() { return 4 * sizeof(uint32_t); } void -Assembler::patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall) +Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall) { Instruction *inst = (Instruction *) start.raw(); uint8_t *dest = toCall.raw(); // Overwrite whatever instruction used to be here with a call. // Always use long jump for two reasons: - // - Jump has to be the same size because of patchWrite_NearCallSize. + // - Jump has to be the same size because of PatchWrite_NearCallSize. // - Return address has to be at the end of replaced block. // Short jump wouldn't be more efficient. - writeLuiOriInstructions(inst, &inst[1], ScratchRegister, (uint32_t)dest); + WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, (uint32_t)dest); inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); inst[3] = InstNOP(); // Ensure everyone sees the code that was just written into memory. - AutoFlushICache::flush(uintptr_t(inst), patchWrite_NearCallSize()); + AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize()); } uint32_t -Assembler::extractLuiOriValue(Instruction *inst0, Instruction *inst1) +Assembler::ExtractLuiOriValue(Instruction *inst0, Instruction *inst1) { InstImm *i0 = (InstImm *) inst0; InstImm *i1 = (InstImm *) inst1; @@ -1401,43 +1401,43 @@ Assembler::extractLuiOriValue(Instruction *inst0, Instruction *inst1) } void -Assembler::updateLuiOriValue(Instruction *inst0, Instruction *inst1, uint32_t value) +Assembler::UpdateLuiOriValue(Instruction *inst0, Instruction *inst1, uint32_t value) { MOZ_ASSERT(inst0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift)); MOZ_ASSERT(inst1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)); - ((InstImm *) inst0)->setImm16(Imm16::upper(Imm32(value))); - ((InstImm *) inst1)->setImm16(Imm16::lower(Imm32(value))); + ((InstImm *) inst0)->setImm16(Imm16::Upper(Imm32(value))); + ((InstImm *) inst1)->setImm16(Imm16::Lower(Imm32(value))); } void -Assembler::writeLuiOriInstructions(Instruction *inst0, Instruction *inst1, +Assembler::WriteLuiOriInstructions(Instruction *inst0, Instruction *inst1, Register reg, uint32_t value) { - *inst0 = InstImm(op_lui, zero, reg, Imm16::upper(Imm32(value))); - *inst1 = InstImm(op_ori, reg, reg, Imm16::lower(Imm32(value))); + *inst0 = InstImm(op_lui, zero, reg, Imm16::Upper(Imm32(value))); + *inst1 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value))); } void -Assembler::patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, +Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, PatchedImmPtr expectedValue) { Instruction *inst = (Instruction *) label.raw(); // Extract old Value - DebugOnly value = Assembler::extractLuiOriValue(&inst[0], &inst[1]); + DebugOnly value = Assembler::ExtractLuiOriValue(&inst[0], &inst[1]); MOZ_ASSERT(value == uint32_t(expectedValue.value)); // Replace with new value - Assembler::updateLuiOriValue(inst, inst->next(), uint32_t(newValue.value)); + Assembler::UpdateLuiOriValue(inst, inst->next(), uint32_t(newValue.value)); AutoFlushICache::flush(uintptr_t(inst), 8); } void -Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue) +Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue) { - patchDataWithValueCheck(label, PatchedImmPtr(newValue.value), + PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expectedValue.value)); } @@ -1447,7 +1447,7 @@ Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, Imm // be totally safe. Since that instruction will never be executed again, a // ICache flush should not be necessary void -Assembler::patchWrite_Imm32(CodeLocationLabel label, Imm32 imm) +Assembler::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) { // Raw is going to be the return address. uint32_t *raw = (uint32_t*)label.raw(); @@ -1457,14 +1457,14 @@ Assembler::patchWrite_Imm32(CodeLocationLabel label, Imm32 imm) } void -Assembler::patchInstructionImmediate(uint8_t *code, PatchedImmPtr imm) +Assembler::PatchInstructionImmediate(uint8_t *code, PatchedImmPtr imm) { InstImm *inst = (InstImm *)code; - Assembler::updateLuiOriValue(inst, inst->next(), (uint32_t)imm.value); + Assembler::UpdateLuiOriValue(inst, inst->next(), (uint32_t)imm.value); } uint8_t * -Assembler::nextInstruction(uint8_t *inst_, uint32_t *count) +Assembler::NextInstruction(uint8_t *inst_, uint32_t *count) { Instruction *inst = reinterpret_cast(inst_); if (count != nullptr) @@ -1578,11 +1578,11 @@ Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) AutoFlushICache::flush(uintptr_t(i2), 4); } -void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst) +void Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction *inst) { InstImm *i0 = (InstImm *) inst; InstImm *i1 = (InstImm *) i0->next(); // Replace with new value - Assembler::updateLuiOriValue(i0, i1, heapSize); + Assembler::UpdateLuiOriValue(i0, i1, heapSize); } diff --git a/js/src/jit/mips/Assembler-mips.h b/js/src/jit/mips/Assembler-mips.h index 786938281be0..9e1f268ba35d 100644 --- a/js/src/jit/mips/Assembler-mips.h +++ b/js/src/jit/mips/Assembler-mips.h @@ -438,9 +438,9 @@ class BOffImm16 : data ((offset - 4) >> 2 & Imm16Mask) { MOZ_ASSERT((offset & 0x3) == 0); - MOZ_ASSERT(isInRange(offset)); + MOZ_ASSERT(IsInRange(offset)); } - static bool isInRange(int offset) { + static bool IsInRange(int offset) { if ((offset - 4) < (INT16_MIN << 2)) return false; if ((offset - 4) > (INT16_MAX << 2)) @@ -479,9 +479,9 @@ class JOffImm26 : data ((offset - 4) >> 2 & Imm26Mask) { MOZ_ASSERT((offset & 0x3) == 0); - MOZ_ASSERT(isInRange(offset)); + MOZ_ASSERT(IsInRange(offset)); } - static bool isInRange(int offset) { + static bool IsInRange(int offset) { if ((offset - 4) < -536870912) return false; if ((offset - 4) > 536870908) @@ -518,16 +518,16 @@ class Imm16 uint32_t decodeUnsigned() { return value; } - static bool isInSignedRange(int32_t imm) { + static bool IsInSignedRange(int32_t imm) { return imm >= INT16_MIN && imm <= INT16_MAX; } - static bool isInUnsignedRange(uint32_t imm) { + static bool IsInUnsignedRange(uint32_t imm) { return imm <= UINT16_MAX ; } - static Imm16 lower (Imm32 imm) { + static Imm16 Lower (Imm32 imm) { return Imm16(imm.value & 0xffff); } - static Imm16 upper (Imm32 imm) { + static Imm16 Upper (Imm32 imm) { return Imm16((imm.value >> 16) & 0xffff); } }; @@ -749,7 +749,7 @@ class Assembler : public AssemblerShared } public: - static uintptr_t getPointer(uint8_t *); + static uintptr_t GetPointer(uint8_t *); bool oom() const; @@ -791,7 +791,7 @@ class Assembler : public AssemblerShared BufferOffset writeInst(uint32_t x, uint32_t *dest = nullptr); // A static variant for the cases where we don't want to have an assembler // object at all. Normally, you would use the dummy (nullptr) object. - static void writeInstStatic(uint32_t x, uint32_t *dest); + static void WriteInstStatic(uint32_t x, uint32_t *dest); public: BufferOffset align(int alignment); @@ -1012,37 +1012,37 @@ class Assembler : public AssemblerShared void flushBuffer() { } - static uint32_t patchWrite_NearCallSize(); - static uint32_t nopSize() { return 4; } + static uint32_t PatchWrite_NearCallSize(); + static uint32_t NopSize() { return 4; } - static uint32_t extractLuiOriValue(Instruction *inst0, Instruction *inst1); - static void updateLuiOriValue(Instruction *inst0, Instruction *inst1, uint32_t value); - static void writeLuiOriInstructions(Instruction *inst, Instruction *inst1, + static uint32_t ExtractLuiOriValue(Instruction *inst0, Instruction *inst1); + static void UpdateLuiOriValue(Instruction *inst0, Instruction *inst1, uint32_t value); + static void WriteLuiOriInstructions(Instruction *inst, Instruction *inst1, Register reg, uint32_t value); - static void patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall); - static void patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, + static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall); + static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, PatchedImmPtr expectedValue); - static void patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, + static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue); - static void patchWrite_Imm32(CodeLocationLabel label, Imm32 imm); + static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm); - static void patchInstructionImmediate(uint8_t *code, PatchedImmPtr imm); + static void PatchInstructionImmediate(uint8_t *code, PatchedImmPtr imm); - static uint32_t alignDoubleArg(uint32_t offset) { + static uint32_t AlignDoubleArg(uint32_t offset) { return (offset + 1U) &~ 1U; } - static uint8_t *nextInstruction(uint8_t *instruction, uint32_t *count = nullptr); + static uint8_t *NextInstruction(uint8_t *instruction, uint32_t *count = nullptr); static void ToggleToJmp(CodeLocationLabel inst_); static void ToggleToCmp(CodeLocationLabel inst_); static void ToggleCall(CodeLocationLabel inst_, bool enabled); - static void updateBoundsCheck(uint32_t logHeapSize, Instruction *inst); + static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction *inst); void processCodeLabels(uint8_t *rawCode); - static int32_t extractCodeLabelOffset(uint8_t *code); + static int32_t ExtractCodeLabelOffset(uint8_t *code); bool bailed() { return m_buffer.bail(); diff --git a/js/src/jit/mips/CodeGenerator-mips.cpp b/js/src/jit/mips/CodeGenerator-mips.cpp index 27f9b8499df0..c6945a8c6bb0 100644 --- a/js/src/jit/mips/CodeGenerator-mips.cpp +++ b/js/src/jit/mips/CodeGenerator-mips.cpp @@ -1862,7 +1862,7 @@ CodeGeneratorMIPS::generateInvalidateEpilogue() // Ensure that there is enough space in the buffer for the OsiPoint // patching to occur. Otherwise, we could overwrite the invalidation // epilogue. - for (size_t i = 0; i < sizeof(void *); i += Assembler::nopSize()) + for (size_t i = 0; i < sizeof(void *); i += Assembler::NopSize()) masm.nop(); masm.bind(&invalidate_); diff --git a/js/src/jit/mips/MacroAssembler-mips.cpp b/js/src/jit/mips/MacroAssembler-mips.cpp index da8aedc174ec..91e6cf3b7c76 100644 --- a/js/src/jit/mips/MacroAssembler-mips.cpp +++ b/js/src/jit/mips/MacroAssembler-mips.cpp @@ -267,28 +267,28 @@ MacroAssemblerMIPS::ma_li(Register dest, AbsoluteLabel *label) void MacroAssemblerMIPS::ma_li(Register dest, Imm32 imm) { - if (Imm16::isInSignedRange(imm.value)) { + if (Imm16::IsInSignedRange(imm.value)) { as_addiu(dest, zero, imm.value); - } else if (Imm16::isInUnsignedRange(imm.value)) { - as_ori(dest, zero, Imm16::lower(imm).encode()); - } else if (Imm16::lower(imm).encode() == 0) { - as_lui(dest, Imm16::upper(imm).encode()); + } else if (Imm16::IsInUnsignedRange(imm.value)) { + as_ori(dest, zero, Imm16::Lower(imm).encode()); + } else if (Imm16::Lower(imm).encode() == 0) { + as_lui(dest, Imm16::Upper(imm).encode()); } else { - as_lui(dest, Imm16::upper(imm).encode()); - as_ori(dest, dest, Imm16::lower(imm).encode()); + as_lui(dest, Imm16::Upper(imm).encode()); + as_ori(dest, dest, Imm16::Lower(imm).encode()); } } // This method generates lui and ori instruction pair that can be modified by -// updateLuiOriValue, either during compilation (eg. Assembler::bind), or +// UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or // during execution (eg. jit::PatchJump). void MacroAssemblerMIPS::ma_liPatchable(Register dest, Imm32 imm) { m_buffer.ensureSpace(2 * sizeof(uint32_t)); - as_lui(dest, Imm16::upper(imm).encode()); - as_ori(dest, dest, Imm16::lower(imm).encode()); + as_lui(dest, Imm16::Upper(imm).encode()); + as_ori(dest, dest, Imm16::Lower(imm).encode()); } void @@ -392,7 +392,7 @@ MacroAssemblerMIPS::ma_and(Register rd, Imm32 imm) void MacroAssemblerMIPS::ma_and(Register rd, Register rs, Imm32 imm) { - if (Imm16::isInUnsignedRange(imm.value)) { + if (Imm16::IsInUnsignedRange(imm.value)) { as_andi(rd, rs, imm.value); } else { ma_li(ScratchRegister, imm); @@ -422,7 +422,7 @@ MacroAssemblerMIPS::ma_or(Register rd, Imm32 imm) void MacroAssemblerMIPS::ma_or(Register rd, Register rs, Imm32 imm) { - if (Imm16::isInUnsignedRange(imm.value)) { + if (Imm16::IsInUnsignedRange(imm.value)) { as_ori(rd, rs, imm.value); } else { ma_li(ScratchRegister, imm); @@ -452,7 +452,7 @@ MacroAssemblerMIPS::ma_xor(Register rd, Imm32 imm) void MacroAssemblerMIPS::ma_xor(Register rd, Register rs, Imm32 imm) { - if (Imm16::isInUnsignedRange(imm.value)) { + if (Imm16::IsInUnsignedRange(imm.value)) { as_xori(rd, rs, imm.value); } else { ma_li(ScratchRegister, imm); @@ -466,7 +466,7 @@ MacroAssemblerMIPS::ma_xor(Register rd, Register rs, Imm32 imm) void MacroAssemblerMIPS::ma_addu(Register rd, Register rs, Imm32 imm) { - if (Imm16::isInSignedRange(imm.value)) { + if (Imm16::IsInSignedRange(imm.value)) { as_addiu(rd, rs, imm.value); } else { ma_li(ScratchRegister, imm); @@ -507,7 +507,7 @@ MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Labe { // Check for signed range because of as_addiu // Check for unsigned range because of as_xori - if (Imm16::isInSignedRange(imm.value) && Imm16::isInUnsignedRange(imm.value)) { + if (Imm16::IsInSignedRange(imm.value) && Imm16::IsInUnsignedRange(imm.value)) { Label goodAddition; as_addiu(rd, rs, imm.value); @@ -536,7 +536,7 @@ MacroAssemblerMIPS::ma_subu(Register rd, Register rs, Register rt) void MacroAssemblerMIPS::ma_subu(Register rd, Register rs, Imm32 imm) { - if (Imm16::isInSignedRange(-imm.value)) { + if (Imm16::IsInSignedRange(-imm.value)) { as_addiu(rd, rs, -imm.value); } else { ma_li(ScratchRegister, imm); @@ -701,7 +701,7 @@ MacroAssemblerMIPS::ma_load(Register dest, Address address, { int16_t encodedOffset; Register base; - if (!Imm16::isInSignedRange(address.offset)) { + if (!Imm16::IsInSignedRange(address.offset)) { ma_li(ScratchRegister, Imm32(address.offset)); as_addu(ScratchRegister, address.base, ScratchRegister); base = ScratchRegister; @@ -747,7 +747,7 @@ MacroAssemblerMIPS::ma_store(Register data, Address address, LoadStoreSize size, { int16_t encodedOffset; Register base; - if (!Imm16::isInSignedRange(address.offset)) { + if (!Imm16::IsInSignedRange(address.offset)) { ma_li(ScratchRegister, Imm32(address.offset)); as_addu(ScratchRegister, address.base, ScratchRegister); base = ScratchRegister; @@ -828,7 +828,7 @@ MacroAssemblerMIPS::ma_sw(Imm32 imm, Address address) MOZ_ASSERT(address.base != ScratchRegister); ma_li(ScratchRegister, imm); - if (Imm16::isInSignedRange(address.offset)) { + if (Imm16::IsInSignedRange(address.offset)) { as_sw(ScratchRegister, address.base, Imm16(address.offset).encode()); } else { MOZ_ASSERT(address.base != SecondScratchReg); @@ -939,11 +939,11 @@ MacroAssemblerMIPS::branchWithCode(InstImm code, Label *label, JumpKind jumpKind if (label->bound()) { int32_t offset = label->offset() - m_buffer.nextOffset().getOffset(); - if (BOffImm16::isInRange(offset)) + if (BOffImm16::IsInRange(offset)) jumpKind = ShortJump; if (jumpKind == ShortJump) { - MOZ_ASSERT(BOffImm16::isInRange(offset)); + MOZ_ASSERT(BOffImm16::IsInRange(offset)); code.setBOffImm16(BOffImm16(offset)); writeInst(code.encode()); as_nop(); @@ -1358,7 +1358,7 @@ MacroAssemblerMIPS::ma_mv(ValueOperand src, FloatRegister dest) void MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address) { - if (Imm16::isInSignedRange(address.offset)) { + if (Imm16::IsInSignedRange(address.offset)) { as_ls(ft, address.base, Imm16(address.offset).encode()); } else { MOZ_ASSERT(address.base != ScratchRegister); @@ -1375,7 +1375,7 @@ MacroAssemblerMIPS::ma_ld(FloatRegister ft, Address address) // alignment. int32_t off2 = address.offset + TAG_OFFSET; - if (Imm16::isInSignedRange(address.offset) && Imm16::isInSignedRange(off2)) { + if (Imm16::IsInSignedRange(address.offset) && Imm16::IsInSignedRange(off2)) { as_ls(ft, address.base, Imm16(address.offset).encode()); as_ls(getOddPair(ft), address.base, Imm16(off2).encode()); } else { @@ -1390,7 +1390,7 @@ void MacroAssemblerMIPS::ma_sd(FloatRegister ft, Address address) { int32_t off2 = address.offset + TAG_OFFSET; - if (Imm16::isInSignedRange(address.offset) && Imm16::isInSignedRange(off2)) { + if (Imm16::IsInSignedRange(address.offset) && Imm16::IsInSignedRange(off2)) { as_ss(ft, address.base, Imm16(address.offset).encode()); as_ss(getOddPair(ft), address.base, Imm16(off2).encode()); } else { @@ -1411,7 +1411,7 @@ MacroAssemblerMIPS::ma_sd(FloatRegister ft, BaseIndex address) void MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address) { - if (Imm16::isInSignedRange(address.offset)) { + if (Imm16::IsInSignedRange(address.offset)) { as_ss(ft, address.base, Imm16(address.offset).encode()); } else { ma_li(ScratchRegister, Imm32(address.offset)); @@ -2845,7 +2845,7 @@ MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, BaseIndex d // Make sure that ma_sw doesn't clobber ScratchRegister int32_t offset = dest.offset; - if (!Imm16::isInSignedRange(offset)) { + if (!Imm16::IsInSignedRange(offset)) { ma_li(SecondScratchReg, Imm32(offset)); as_addu(ScratchRegister, ScratchRegister, SecondScratchReg); offset = 0; @@ -2889,7 +2889,7 @@ MacroAssemblerMIPSCompat::storeValue(const Value &val, BaseIndex dest) // Make sure that ma_sw doesn't clobber ScratchRegister int32_t offset = dest.offset; - if (!Imm16::isInSignedRange(offset)) { + if (!Imm16::IsInSignedRange(offset)) { ma_li(SecondScratchReg, Imm32(offset)); as_addu(ScratchRegister, ScratchRegister, SecondScratchReg); offset = 0; diff --git a/js/src/jit/mips/MoveEmitter-mips.cpp b/js/src/jit/mips/MoveEmitter-mips.cpp index 12864b07a691..0ca88c0bd228 100644 --- a/js/src/jit/mips/MoveEmitter-mips.cpp +++ b/js/src/jit/mips/MoveEmitter-mips.cpp @@ -42,7 +42,7 @@ Address MoveEmitterMIPS::cycleSlot() const { int offset = masm.framePushed() - pushedAtCycle_; - MOZ_ASSERT(Imm16::isInSignedRange(offset)); + MOZ_ASSERT(Imm16::IsInSignedRange(offset)); return Address(StackPointer, offset); } diff --git a/js/src/jit/shared/Assembler-x86-shared.h b/js/src/jit/shared/Assembler-x86-shared.h index 7bbb3975bb66..4495446f8dcf 100644 --- a/js/src/jit/shared/Assembler-x86-shared.h +++ b/js/src/jit/shared/Assembler-x86-shared.h @@ -248,7 +248,7 @@ class AssemblerX86Shared : public AssemblerShared MOZ_ASSUME_UNREACHABLE("Unknown double condition"); } - static void staticAsserts() { + static void StaticAsserts() { // DoubleConditionBits should not interfere with x86 condition codes. JS_STATIC_ASSERT(!((Equal | NotEqual | Above | AboveOrEqual | Below | BelowOrEqual | Parity | NoParity) & DoubleConditionBits)); @@ -282,7 +282,7 @@ class AssemblerX86Shared : public AssemblerShared void executableCopy(void *buffer); void processCodeLabels(uint8_t *rawCode); - static int32_t extractCodeLabelOffset(uint8_t *code) { + static int32_t ExtractCodeLabelOffset(uint8_t *code) { return *(uintptr_t *)code; } void copyJumpRelocationTable(uint8_t *dest); @@ -1640,46 +1640,46 @@ class AssemblerX86Shared : public AssemblerShared // Patching. - static size_t patchWrite_NearCallSize() { + static size_t PatchWrite_NearCallSize() { return 5; } - static uintptr_t getPointer(uint8_t *instPtr) { + static uintptr_t GetPointer(uint8_t *instPtr) { uintptr_t *ptr = ((uintptr_t *) instPtr) - 1; return *ptr; } // Write a relative call at the start location |dataLabel|. // Note that this DOES NOT patch data that comes before |label|. - static void patchWrite_NearCall(CodeLocationLabel startLabel, CodeLocationLabel target) { + static void PatchWrite_NearCall(CodeLocationLabel startLabel, CodeLocationLabel target) { uint8_t *start = startLabel.raw(); *start = 0xE8; - ptrdiff_t offset = target - startLabel - patchWrite_NearCallSize(); + ptrdiff_t offset = target - startLabel - PatchWrite_NearCallSize(); JS_ASSERT(int32_t(offset) == offset); *((int32_t *) (start + 1)) = offset; } - static void patchWrite_Imm32(CodeLocationLabel dataLabel, Imm32 toWrite) { + static void PatchWrite_Imm32(CodeLocationLabel dataLabel, Imm32 toWrite) { *((int32_t *) dataLabel.raw() - 1) = toWrite.value; } - static void patchDataWithValueCheck(CodeLocationLabel data, PatchedImmPtr newData, + static void PatchDataWithValueCheck(CodeLocationLabel data, PatchedImmPtr newData, PatchedImmPtr expectedData) { // The pointer given is a pointer to *after* the data. uintptr_t *ptr = ((uintptr_t *) data.raw()) - 1; JS_ASSERT(*ptr == (uintptr_t)expectedData.value); *ptr = (uintptr_t)newData.value; } - static void patchDataWithValueCheck(CodeLocationLabel data, ImmPtr newData, ImmPtr expectedData) { - patchDataWithValueCheck(data, PatchedImmPtr(newData.value), PatchedImmPtr(expectedData.value)); + static void PatchDataWithValueCheck(CodeLocationLabel data, ImmPtr newData, ImmPtr expectedData) { + PatchDataWithValueCheck(data, PatchedImmPtr(newData.value), PatchedImmPtr(expectedData.value)); } - static void patchInstructionImmediate(uint8_t *code, PatchedImmPtr imm) { + static void PatchInstructionImmediate(uint8_t *code, PatchedImmPtr imm) { MOZ_ASSUME_UNREACHABLE("Unused."); } - static uint32_t nopSize() { + static uint32_t NopSize() { return 1; } - static uint8_t *nextInstruction(uint8_t *cur, uint32_t *count) { + static uint8_t *NextInstruction(uint8_t *cur, uint32_t *count) { MOZ_ASSUME_UNREACHABLE("nextInstruction NYI on x86"); } diff --git a/js/src/jit/shared/CodeGenerator-shared.cpp b/js/src/jit/shared/CodeGenerator-shared.cpp index b2cbb39aefdb..a7c29b660e03 100644 --- a/js/src/jit/shared/CodeGenerator-shared.cpp +++ b/js/src/jit/shared/CodeGenerator-shared.cpp @@ -411,13 +411,13 @@ CodeGeneratorShared::ensureOsiSpace() // // At points where we want to ensure that invalidation won't corrupt an // important instruction, we make sure to pad with nops. - if (masm.currentOffset() - lastOsiPointOffset_ < Assembler::patchWrite_NearCallSize()) { - int32_t paddingSize = Assembler::patchWrite_NearCallSize(); + if (masm.currentOffset() - lastOsiPointOffset_ < Assembler::PatchWrite_NearCallSize()) { + int32_t paddingSize = Assembler::PatchWrite_NearCallSize(); paddingSize -= masm.currentOffset() - lastOsiPointOffset_; for (int32_t i = 0; i < paddingSize; ++i) masm.nop(); } - JS_ASSERT(masm.currentOffset() - lastOsiPointOffset_ >= Assembler::patchWrite_NearCallSize()); + JS_ASSERT(masm.currentOffset() - lastOsiPointOffset_ >= Assembler::PatchWrite_NearCallSize()); lastOsiPointOffset_ = masm.currentOffset(); } diff --git a/js/src/jit/shared/CodeGenerator-x86-shared.cpp b/js/src/jit/shared/CodeGenerator-x86-shared.cpp index 2f42df4a8e13..019f8b9ede60 100644 --- a/js/src/jit/shared/CodeGenerator-x86-shared.cpp +++ b/js/src/jit/shared/CodeGenerator-x86-shared.cpp @@ -2006,7 +2006,7 @@ CodeGeneratorX86Shared::generateInvalidateEpilogue() // Ensure that there is enough space in the buffer for the OsiPoint // patching to occur. Otherwise, we could overwrite the invalidation // epilogue. - for (size_t i = 0; i < sizeof(void *); i+= Assembler::nopSize()) + for (size_t i = 0; i < sizeof(void *); i += Assembler::NopSize()) masm.nop(); masm.bind(&invalidate_); diff --git a/js/src/jit/shared/IonAssemblerBuffer.h b/js/src/jit/shared/IonAssemblerBuffer.h index a47b47b58fbc..e2b9616ab40f 100644 --- a/js/src/jit/shared/IonAssemblerBuffer.h +++ b/js/src/jit/shared/IonAssemblerBuffer.h @@ -13,9 +13,11 @@ namespace js { namespace jit { -// This should theoretically reside inside of AssemblerBuffer, but that won't be nice -// AssemblerBuffer is templated, BufferOffset would be indirectly. -// A BufferOffset is the offset into a buffer, expressed in bytes of instructions. +// This should theoretically reside inside of AssemblerBuffer, but that won't be +// nice AssemblerBuffer is templated, BufferOffset would be indirectly. +// +// A BufferOffset is the offset into a buffer, expressed in bytes of +// instructions. class BufferOffset { @@ -27,10 +29,10 @@ class BufferOffset int getOffset() const { return offset; } // A BOffImm is a Branch Offset Immediate. It is an architecture-specific - // structure that holds the immediate for a pc relative branch. - // diffB takes the label for the destination of the branch, and encodes - // the immediate for the branch. This will need to be fixed up later, since - // A pool may be inserted between the branch and its destination + // structure that holds the immediate for a pc relative branch. diffB takes + // the label for the destination of the branch, and encodes the immediate + // for the branch. This will need to be fixed up later, since A pool may be + // inserted between the branch and its destination. template BOffImm diffB(BufferOffset other) const { return BOffImm(offset - other.offset); @@ -84,7 +86,8 @@ template struct AssemblerBuffer { public: - AssemblerBuffer() : head(nullptr), tail(nullptr), m_oom(false), m_bail(false), bufferSize(0), LifoAlloc_(8192) {} + AssemblerBuffer() : head(nullptr), tail(nullptr), m_oom(false), + m_bail(false), bufferSize(0), LifoAlloc_(8192) {} protected: typedef BufferSlice Slice; typedef AssemblerBuffer AssemblerBuffer_; @@ -93,12 +96,12 @@ struct AssemblerBuffer public: bool m_oom; bool m_bail; - // How much data has been added to the buffer thusfar. + // How much data has been added to the buffer thus far. uint32_t bufferSize; uint32_t lastInstSize; bool isAligned(int alignment) const { - // make sure the requested alignment is a power of two. - JS_ASSERT((alignment & (alignment-1)) == 0); + // Make sure the requested alignment is a power of two. + JS_ASSERT(IsPowerOfTwo(alignment)); return !(size() & (alignment - 1)); } virtual Slice *newSlice(LifoAlloc &a) { @@ -111,7 +114,7 @@ struct AssemblerBuffer return tmp; } bool ensureSpace(int size) { - if (tail != nullptr && tail->size()+size <= SliceSize) + if (tail != nullptr && tail->size() + size <= SliceSize) return true; Slice *tmp = newSlice(LifoAlloc_); if (tmp == nullptr) @@ -170,22 +173,22 @@ struct AssemblerBuffer void fail_bail() { m_bail = true; } - // finger for speeding up accesses + // Finger for speeding up accesses. Slice *finger; unsigned int finger_offset; Inst *getInst(BufferOffset off) { int local_off = off.getOffset(); - // don't update the structure's finger in place, so there is the option + // Don't update the structure's finger in place, so there is the option // to not update it. Slice *cur = nullptr; int cur_off; - // get the offset that we'd be dealing with by walking through backwards + // Get the offset that we'd be dealing with by walking through + // backwards. int end_off = bufferSize - local_off; // If end_off is negative, then it is in the last chunk, and there is no // real work to be done. - if (end_off <= 0) { + if (end_off <= 0) return (Inst*)&tail->instructions[-end_off]; - } bool used_finger = false; int finger_off = abs((int)(local_off - finger_offset)); if (finger_off < Min(local_off, end_off)) { @@ -194,11 +197,11 @@ struct AssemblerBuffer cur_off = finger_offset; used_finger = true; } else if (local_off < end_off) { - // it is closest to the start + // It is closest to the start. cur = head; cur_off = 0; } else { - // it is closest to the end + // It is closest to the end. cur = tail; cur_off = bufferSize; } @@ -228,7 +231,8 @@ struct AssemblerBuffer finger = cur; finger_offset = cur_off; } - // the offset within this node should not be larger than the node itself. + // The offset within this node should not be larger than the node + // itself. JS_ASSERT(local_off < (int)cur->size()); return (Inst*)&cur->instructions[local_off]; } @@ -272,7 +276,7 @@ struct AssemblerBuffer AssemblerBufferInstIterator(BufferOffset off, AssemblerBuffer_ *buff) : bo(off), m_buffer(buff) {} Inst *next() { Inst *i = m_buffer->getInst(bo); - bo = BufferOffset(bo.getOffset()+i->size()); + bo = BufferOffset(bo.getOffset() + i->size()); return cur(); }; Inst *cur() { diff --git a/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h index ebd6d4cf0253..de99f34fdd8c 100644 --- a/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h +++ b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h @@ -30,21 +30,22 @@ struct Pool public: const bool isBackref; const bool canDedup; - // "other" is the backwards half of this pool, it is held in another pool structure + // "other" is the backwards half of this pool, it is held in another pool structure. Pool *other; uint8_t *poolData; uint32_t numEntries; uint32_t buffSize; LoadOffsets loadOffsets; - // When filling pools where the the size of an immediate is larger - // than the size of an instruction, we find we're in a case where the distance between the - // next instruction and the next pool slot is increasing! + // When filling pools where the the size of an immediate is larger than the + // size of an instruction, we find we're in a case where the distance + // between the next instruction and the next pool slot is increasing! // Moreover, If we want to do fancy things like deduplicate pool entries at // dump time, we may not know the location in a pool (and thus the limiting load) // until very late. - // Lastly, it may be beneficial to interleave the pools. I have absolutely no idea - // how that will work, but my suspicions are that it will be difficult. + // Lastly, it may be beneficial to interleave the pools. I have absolutely + // no idea how that will work, but my suspicions are that it will be + // difficult. BufferOffset limitingUser; int limitingUsee; @@ -63,12 +64,12 @@ struct Pool alignment(garbage), isBackref(garbage), canDedup(garbage), other((Pool*)garbage) { } - // Sometimes, when we are adding large values to a pool, the limiting use may change. - // Handle this case. nextInst is the address of the + // Sometimes, when we are adding large values to a pool, the limiting use + // may change. Handle this case. The nextInst is the address of the void updateLimiter(BufferOffset nextInst) { int oldRange, newRange; if (isBackref) { - // common expressions that are not subtracted: the location of the pool, ... + // Common expressions that are not subtracted: the location of the pool, ... oldRange = limitingUser.getOffset() - ((numEntries - limitingUsee) * immSize); newRange = nextInst.getOffset(); } else { @@ -81,10 +82,9 @@ struct Pool limitingUsee = numEntries; } } - // checkFull is called before any modifications have been made. - // It is "if we were to add this instruction and pool entry, - // would we be in an invalid state?". If it is true, then it is in fact - // time for a "pool dump". + // checkFull is called before any modifications have been made. It is "if + // we were to add this instruction and pool entry, would we be in an invalid + // state?". If it is true, then it is in fact time for a "pool dump". // poolOffset is the distance from the end of the current section to the end of the pool. // For the last section of the pool, this will be the size of the footer @@ -98,23 +98,22 @@ struct Pool bool checkFullBackref(int poolOffset, int codeOffset) { if (!limitingUser.assigned()) return false; - signed int distance = - limitingUser.getOffset() + bias - - codeOffset + poolOffset + + signed int distance = limitingUser.getOffset() + bias - codeOffset + poolOffset + (numEntries - limitingUsee + 1) * immSize; if (distance >= maxOffset) return true; return false; } - // checkFull answers the question "If a pool were placed at poolOffset, would - // any reference into the pool be out of range?". It is meant to be used as instructions - // and elements are inserted, to determine if a saved perforation point needs to be used. + // checkFull answers the question "If a pool were placed at poolOffset, + // would any reference into the pool be out of range?". It is meant to be + // used as instructions and elements are inserted, to determine if a saved + // perforation point needs to be used. bool checkFull(int poolOffset) { - // Inserting an instruction into the stream can - // push any of the pools out of range. - // Similarly, inserting into a pool can push the pool entry out of range + // Inserting an instruction into the stream can push any of the pools + // out of range. Similarly, inserting into a pool can push the pool + // entry out of range. JS_ASSERT(!isBackref); // Not full if there aren't any uses. if (!limitingUser.assigned()) { @@ -128,7 +127,8 @@ struct Pool return false; } - // By the time this function is called, we'd damn well better know that this is going to succeed. + // By the time this function is called, we'd damn well better know that this + // is going to succeed. uint32_t insertEntry(uint8_t *data, BufferOffset off, LifoAlloc &LifoAlloc_) { if (numEntries == buffSize) { buffSize <<= 1; @@ -166,9 +166,9 @@ struct Pool return true; } - // WARNING: This will not always align values. It will only - // align to the requirement of the pool. If the pool is empty, - // there is nothing to be aligned, so it will not perform any alignment + // WARNING: This will not always align values. It will only align to the + // requirement of the pool. If the pool is empty, there is nothing to be + // aligned, so it will not perform any alignment. uint8_t* align(uint8_t *ptr) { return (uint8_t*)align((uint32_t)ptr); } @@ -316,13 +316,13 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer typedef BufferSliceTail BufferSlice; typedef AssemblerBuffer Parent; - // The size of a guard instruction + // The size of a guard instruction. const int guardSize; - // The size of the header that is put at the beginning of a full pool + // The size of the header that is put at the beginning of a full pool. const int headerSize; // The size of a footer that is put in a pool after it is full. const int footerSize; - // the number of sub-pools that we can allocate into. + // The number of sub-pools that we can allocate into. static const int numPoolKinds = 1 << poolKindBits; Pool *pools; @@ -330,34 +330,36 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer // The buffer should be aligned to this address. const int instBufferAlign; - // the number of times we've dumped the pool. + // The number of times we've dumped the pool. int numDumps; struct PoolInfo { - int offset; // the number of instructions before the start of the pool - int size; // the size of the pool, including padding - int finalPos; // the end of the buffer, in bytes from the beginning of the buffer + int offset; // The number of instructions before the start of the pool. + int size; // The size of the pool, including padding. + int finalPos; // The end of the buffer, in bytes from the beginning of the buffer. BufferSlice *slice; }; PoolInfo *poolInfo; - // we need to keep track of how large the pools are, so we can allocate - // enough space for them later. This should include any amount of padding + // We need to keep track of how large the pools are, so we can allocate + // enough space for them later. This should include any amount of padding // necessary to keep the pools aligned. int poolSize; - // The Assembler should set this to true if it does not want us to dump a pool here + // The Assembler should set this to true if it does not want us to dump a + // pool here. int canNotPlacePool; // Are we filling up the forwards or backwards pools? bool inBackref; - // Insert a number of NOP instructions between each requested instruction at all - // locations at which a pool can potentially spill. This is useful for checking - // that instruction locations are correctly referenced and/or followed. + // Insert a number of NOP instructions between each requested instruction at + // all locations at which a pool can potentially spill. This is useful for + // checking that instruction locations are correctly referenced and/or + // followed. const uint32_t nopFillInst; const uint32_t nopFill; - // Inhibit the insertion of fill NOPs in the dynamic context in which they are - // being inserted. + // Inhibit the insertion of fill NOPs in the dynamic context in which they + // are being inserted. bool inhibitNops; - // Cache the last place we saw an opportunity to dump the pool + // Cache the last place we saw an opportunity to dump the pool. BufferOffset perforation; BufferSlice *perforatedNode; public: @@ -413,7 +415,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer void executableCopy(uint8_t *dest_) { if (this->oom()) return; - // TODO: only do this when the pool actually has a value in it + // TODO: only do this when the pool actually has a value in it. flushPool(); for (int idx = 0; idx < numPoolKinds; idx++) { JS_ASSERT(pools[idx].numEntries == 0 && pools[idx].other->numEntries == 0); @@ -429,7 +431,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer for (unsigned int idx = 0; idx size()/InstBaseSize; idx++, curInstOffset += InstBaseSize) { // Is the current instruction a branch? - if (cur->isBranch[idx >> 3] & (1<<(idx&7))) { + if (cur->isBranch[idx >> 3] & (1 << (idx & 7))) { // It's a branch. fix up the branchiness! patchBranch((Inst*)&src[idx], curIndex, BufferOffset(curInstOffset)); } @@ -437,11 +439,11 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer } dest+=cur->size()/InstBaseSize; if (cur->data != nullptr) { - // have the repatcher move on to the next pool + // Have the repatcher move on to the next pool. curIndex ++; - // loop over all of the pools, copying them into place. + // Loop over all of the pools, copying them into place. uint8_t *poolDest = (uint8_t*)dest; - Asm::writePoolHeader(poolDest, cur->data, cur->isNatural); + Asm::WritePoolHeader(poolDest, cur->data, cur->isNatural); poolDest += headerSize; for (int idx = 0; idx < numPoolKinds; idx++) { Pool *curPool = &cur->data[idx]; @@ -450,18 +452,20 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer memcpy(poolDest, curPool->poolData, curPool->immSize * curPool->numEntries); poolDest += curPool->immSize * curPool->numEntries; } - // now go over the whole list backwards, and copy in the reverse portions - for (int idx = numPoolKinds-1; idx >= 0; idx--) { + // Now go over the whole list backwards, and copy in the reverse + // portions. + for (int idx = numPoolKinds - 1; idx >= 0; idx--) { Pool *curPool = cur->data[idx].other; // align the pool. poolDest = curPool->align(poolDest); memcpy(poolDest, curPool->poolData, curPool->immSize * curPool->numEntries); poolDest += curPool->immSize * curPool->numEntries; } - // write a footer in place - Asm::writePoolFooter(poolDest, cur->data, cur->isNatural); + // Write a footer in place. + Asm::WritePoolFooter(poolDest, cur->data, cur->isNatural); poolDest += footerSize; - // at this point, poolDest had better still be aligned to a chunk boundary. + // At this point, poolDest had better still be aligned to a + // chunk boundary. dest = (Chunk*) poolDest; } } @@ -472,8 +476,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer if (nopFill > 0 && !inhibitNops && !canNotPlacePool) { inhibitNops = true; - // Fill using a branch-nop rather than a NOP so this can - // be distinguished and skipped. + // Fill using a branch-nop rather than a NOP so this can be + // distinguished and skipped. for (int i = 0; i < nopFill; i++) putInt(nopFillInst); @@ -497,26 +501,26 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer spewEntry(data, p->immSize); IonSpewFin(IonSpew_Pools); } - // insert the pool value + // Insert the pool value. if (inBackref) token = insertEntryBackwards(instSize, inst, p, data); else token = insertEntryForwards(instSize, inst, p, data); - // now to get an instruction to write + // Now to get an instruction to write. PoolEntry retPE; if (p != nullptr) { if (this->oom()) return BufferOffset(); int poolId = p - pools; IonSpew(IonSpew_Pools, "[%d] Entry has token %d, offset ~%d", id, token, size()); - Asm::insertTokenIntoTag(instSize, inst, token); + Asm::InsertTokenIntoTag(instSize, inst, token); JS_ASSERT(poolId < (1 << poolKindBits)); JS_ASSERT(poolId >= 0); - // Figure out the offset within like-kinded pool entries + // Figure out the offset within like-kinded pool entries. retPE = PoolEntry(entryCount[poolId], poolId); entryCount[poolId]++; } - // Now inst is a valid thing to insert into the instruction stream + // Now inst is a valid thing to insert into the instruction stream. if (pe != nullptr) *pe = retPE; if (markAsBranch) @@ -525,7 +529,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer } uint32_t insertEntryBackwards(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data) { - // unlike the forward case, inserting an instruction without inserting + // Unlike the forward case, inserting an instruction without inserting // anything into a pool after a pool has been placed, we don't affect // anything relevant, so we can skip this check entirely! @@ -535,18 +539,17 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer // Instead, assume that we always add the maximum. int poolOffset = footerSize; Pool *cur, *tmp; - // NOTE: we want to process the pools from last to first. - // Since the last pool is pools[0].other, and the first pool - // is pools[numPoolKinds-1], we actually want to process this - // forwards. + // NOTE: we want to process the pools from last to first. Since the last + // pool is pools[0].other, and the first pool is pools[numPoolKinds-1], + // we actually want to process this forwards. for (cur = pools; cur < &pools[numPoolKinds]; cur++) { - // fetch the pool for the backwards half. + // Fetch the pool for the backwards half. tmp = cur->other; if (p == cur) tmp->updateLimiter(this->nextOffset()); if (tmp->checkFullBackref(poolOffset, perforation.getOffset())) { - // uh-oh, the backwards pool is full. Time to finalize it, and + // Uh-oh, the backwards pool is full. Time to finalize it, and // switch to a new forward pool. if (p != nullptr) IonSpew(IonSpew_Pools, "[%d]Inserting pool entry caused a spill", id); @@ -558,8 +561,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer return uint32_t(-1); return this->insertEntryForwards(instSize, inst, p, data); } - // when moving back to front, calculating the alignment is hard, just be - // conservative with it. + // When moving back to front, calculating the alignment is hard, + // just be conservative with it. poolOffset += tmp->immSize * tmp->numEntries + tmp->getAlignment(); if (p == tmp) { poolOffset += tmp->immSize; @@ -568,31 +571,34 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer return p->numEntries + p->other->insertEntry(data, this->nextOffset(), this->LifoAlloc_); } - // Simultaneously insert an instSized instruction into the stream, - // and an entry into the pool. There are many things that can happen. + // Simultaneously insert an instSized instruction into the stream, and an + // entry into the pool. There are many things that can happen. // 1) the insertion goes as planned - // 2) inserting an instruction pushes a previous pool-reference out of range, forcing a dump - // 2a) there isn't a reasonable save point in the instruction stream. We need to save room for - // a guard instruction to branch over the pool. + // 2) inserting an instruction pushes a previous pool-reference out of + // range, forcing a dump + // 2a) there isn't a reasonable save point in the instruction stream. We + // need to save room for a guard instruction to branch over the pool. int insertEntryForwards(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data) { - // Advance the "current offset" by an inst, so everyone knows what their offset should be. + // Advance the "current offset" by an inst, so everyone knows what their + // offset should be. uint32_t nextOffset = this->size() + instSize; uint32_t poolOffset = nextOffset; Pool *tmp; // If we need a guard instruction, reserve space for that. if (!perforatedNode) poolOffset += guardSize; - // Also, take into account the size of the header that will be placed *after* - // the guard instruction + // Also, take into account the size of the header that will be placed + // *after* the guard instruction. poolOffset += headerSize; // Perform the necessary range checks. for (tmp = pools; tmp < &pools[numPoolKinds]; tmp++) { - // The pool may wish for a particular alignment, Let's give it one. + // The pool may wish for a particular alignment. Let's give it one. JS_ASSERT((tmp->getAlignment() & (tmp->getAlignment() - 1)) == 0); - // The pool only needs said alignment *if* there are any entries in the pool - // WARNING: the pool needs said alignment if there are going to be entries in - // the pool after this entry has been inserted + // The pool only needs said alignment *if* there are any entries in + // the pool WARNING: the pool needs said alignment if there are + // going to be entries in the pool after this entry has been + // inserted if (p == tmp) poolOffset = tmp->forceAlign(poolOffset); else @@ -604,7 +610,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer p->updateLimiter(BufferOffset(nextOffset)); } if (tmp->checkFull(poolOffset)) { - // uh-oh. DUMP DUMP DUMP + // uh-oh. DUMP DUMP DUMP. if (p != nullptr) IonSpew(IonSpew_Pools, "[%d] Inserting pool entry caused a spill", id); else @@ -613,7 +619,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer this->dumpPool(); return this->insertEntryBackwards(instSize, inst, p, data); } - // include the size of this pool in the running total + // Include the size of this pool in the running total. if (p == tmp) { nextOffset += tmp->immSize; } @@ -628,16 +634,16 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer return insertEntry(sizeof(uint32_t) / sizeof(uint8_t), (uint8_t*)&value, nullptr, nullptr, nullptr, markAsBranch); } - // Mark the current section as an area where we can - // later go to dump a pool + // Mark the current section as an area where we can later go to dump a pool. void perforate() { - // If we're filling the backrefrences, we don't want to start looking for a new dumpsite. + // If we're filling the backrefrences, we don't want to start looking + // for a new dumpsite. if (inBackref) return; if (canNotPlacePool) return; // If there is nothing in the pool, then it is strictly disadvantageous - // to attempt to place a pool here + // to attempt to place a pool here. bool empty = true; for (int i = 0; i < numPoolKinds; i++) { if (pools[i].numEntries != 0) { @@ -653,13 +659,13 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer IonSpew(IonSpew_Pools, "[%d] Adding a perforation at offset %d", id, perforation.getOffset()); } - // After a pool is finished, no more elements may be added to it. During this phase, we - // will know the exact offsets to the pool entries, and those values should be written into - // the given instructions. + // After a pool is finished, no more elements may be added to it. During + // this phase, we will know the exact offsets to the pool entries, and those + // values should be written into the given instructions. PoolInfo getPoolData() const { int prevOffset = getInfo(numDumps-1).offset; int prevEnd = getInfo(numDumps-1).finalPos; - // calculate the offset of the start of this pool; + // Calculate the offset of the start of this pool. int perfOffset = perforation.assigned() ? perforation.getOffset() : this->nextOffset().getOffset() + this->guardSize; @@ -682,7 +688,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer finOffset=pools[poolIdx].align(finOffset); finOffset+=pools[poolIdx].numEntries * pools[poolIdx].immSize; } - // And compute the necessary adjustments for the second half of the pool. + // And compute the necessary adjustments for the second half of the + // pool. for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) { finOffset=pools[poolIdx].other->align(finOffset); finOffset+=pools[poolIdx].other->numEntries * pools[poolIdx].other->immSize; @@ -698,26 +705,28 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer return ret; } void finishPool() { - // This function should only be called while the backwards half of the pool - // is being filled in. The backwards half of the pool is always in a state - // where it is sane. Everything that needs to be done here is for "sanity's sake". - // The per-buffer pools need to be reset, and we need to record the size of the pool. + // This function should only be called while the backwards half of the + // pool is being filled in. The backwards half of the pool is always in + // a state where it is sane. Everything that needs to be done here is + // for "sanity's sake". The per-buffer pools need to be reset, and we + // need to record the size of the pool. IonSpew(IonSpew_Pools, "[%d] Finishing pool %d", id, numDumps); JS_ASSERT(inBackref); PoolInfo newPoolInfo = getPoolData(); if (newPoolInfo.size == 0) { - // The code below also creates a new pool, but that is not necessary, since - // the pools have not been modified at all. + // The code below also creates a new pool, but that is not + // necessary, since the pools have not been modified at all. new (&perforation) BufferOffset(); perforatedNode = nullptr; inBackref = false; IonSpew(IonSpew_Pools, "[%d] Aborting because the pool is empty", id); - // Bail out early, since we don't want to even pretend these pools exist. + // Bail out early, since we don't want to even pretend these pools + // exist. return; } JS_ASSERT(perforatedNode != nullptr); - if (numDumps >= (1<= (1 << logBasePoolInfo) && (numDumps & (numDumps - 1)) == 0) { + // Need to resize. PoolInfo *tmp = static_cast(this->LifoAlloc_.alloc(sizeof(PoolInfo) * numDumps * 2)); if (tmp == nullptr) { this->fail_oom(); @@ -728,71 +737,78 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer } - // In order to figure out how to fix up the loads for the second half of the pool - // we need to find where the bits of the pool that have been implemented end. + // In order to figure out how to fix up the loads for the second half of + // the pool we need to find where the bits of the pool that have been + // implemented end. int poolOffset = perforation.getOffset(); int magicAlign = getInfo(numDumps-1).finalPos - getInfo(numDumps-1).offset; poolOffset += magicAlign; poolOffset += headerSize; for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { - poolOffset=pools[poolIdx].align(poolOffset); - poolOffset+=pools[poolIdx].numEntries * pools[poolIdx].immSize; + poolOffset = pools[poolIdx].align(poolOffset); + poolOffset += pools[poolIdx].numEntries * pools[poolIdx].immSize; } mozilla::Array outcasts; mozilla::Array outcastEntries; - // All of the pool loads referred to by this code are going to - // need fixing up here. + // All of the pool loads referred to by this code are going to need + // fixing up here. int skippedBytes = 0; for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) { Pool *p = pools[poolIdx].other; JS_ASSERT(p != nullptr); unsigned int idx = p->numEntries-1; - // Allocate space for tracking information that needs to be propagated to the next pool - // as well as space for quickly updating the pool entries in the current pool to remove - // the entries that don't actually fit. I probably should change this over to a vector + // Allocate space for tracking information that needs to be + // propagated to the next pool as well as space for quickly updating + // the pool entries in the current pool to remove the entries that + // don't actually fit. I probably should change this over to a + // vector outcastEntries[poolIdx] = new uint8_t[p->getPoolSize()]; bool *preservedEntries = new bool[p->numEntries]; // Hacks on top of Hacks! - // the patching code takes in the address of the instruction to be patched, - // and the "address" of the element in the pool that we want to load. - // However, since the code isn't actually in an array, we need to lie about - // the address that the pool is in. Furthermore, since the offsets are - // technically from the beginning of the FORWARD reference section, we have - // to lie to ourselves about where this pool starts in order to make sure - // the distance into the pool is interpreted correctly. - // There is a more elegant way to fix this that will need to be implemented - // eventually. We will want to provide the fixup function with a method to - // convert from a 'token' into a pool offset. + // The patching code takes in the address of the instruction to be + // patched, and the "address" of the element in the pool that we + // want to load. However, since the code isn't actually in an array, + // we need to lie about the address that the pool is + // in. Furthermore, since the offsets are technically from the + // beginning of the FORWARD reference section, we have to lie to + // ourselves about where this pool starts in order to make sure the + // distance into the pool is interpreted correctly. There is a more + // elegant way to fix this that will need to be implemented + // eventually. We will want to provide the fixup function with a + // method to convert from a 'token' into a pool offset. poolOffset = p->align(poolOffset); int numSkips = 0; int fakePoolOffset = poolOffset - pools[poolIdx].numEntries * pools[poolIdx].immSize; - for (BufferOffset *iter = p->loadOffsets.end()-1; - iter != p->loadOffsets.begin()-1; --iter, --idx) + for (BufferOffset *iter = p->loadOffsets.end() - 1; + iter != p->loadOffsets.begin() - 1; --iter, --idx) { IonSpew(IonSpew_Pools, "[%d] Linking entry %d in pool %d", id, idx+ pools[poolIdx].numEntries, poolIdx); JS_ASSERT(iter->getOffset() >= perforation.getOffset()); - // Everything here is known, we can safely do the necessary substitutions - Inst * inst = this->getInst(*iter); - // Manually compute the offset, including a possible bias. - // Also take into account the whole size of the pool that is being placed. + // Everything here is known, we can safely do the necessary + // substitutions. + Inst *inst = this->getInst(*iter); + // Manually compute the offset, including a possible bias. Also + // take into account the whole size of the pool that is being + // placed. int codeOffset = fakePoolOffset - iter->getOffset() - newPoolInfo.size + numSkips * p->immSize - skippedBytes; - // That is, patchConstantPoolLoad wants to be handed the address of the - // pool entry that is being loaded. We need to do a non-trivial amount - // of math here, since the pool that we've made does not actually reside there - // in memory. + // That is, PatchConstantPoolLoad wants to be handed the address + // of the pool entry that is being loaded. We need to do a + // non-trivial amount of math here, since the pool that we've + // made does not actually reside there in memory. IonSpew(IonSpew_Pools, "[%d] Fixing offset to %d", id, codeOffset - magicAlign); - if (!Asm::patchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign)) { - // NOTE: if removing this entry happens to change the alignment of the next - // block, chances are you will have a bad time. - // ADDENDUM: this CANNOT happen on ARM, because the only elements that - // fall into this case are doubles loaded via vfp, but they will also be - // the last pool, which means it cannot affect the alignment of any other - // Sub Pools. + if (!Asm::PatchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign)) { + // NOTE: if removing this entry happens to change the + // alignment of the next block, chances are you will have a + // bad time. + // ADDENDUM: this CANNOT happen on ARM, because the only + // elements that fall into this case are doubles loaded via + // vfp, but they will also be the last pool, which means it + // cannot affect the alignment of any other Sub Pools. IonSpew(IonSpew_Pools, "[%d]***Offset was still out of range!***", id, codeOffset - magicAlign); IonSpew(IonSpew_Pools, "[%d] Too complicated; bailingp", id); this->fail_bail(); - // only free up to the current offset + // Only free up to the current offset. for (int pi = poolIdx; pi < numPoolKinds; pi++) delete[] outcastEntries[pi]; delete[] preservedEntries; @@ -801,7 +817,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer preservedEntries[idx] = true; } } - // remove the elements of the pool that should not be there (YAY, MEMCPY) + // Remove the elements of the pool that should not be there (YAY, + // MEMCPY). unsigned int idxDest = 0; // If no elements were skipped, no expensive copy is necessary. if (numSkips != 0) { @@ -821,7 +838,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer delete[] preservedEntries; preservedEntries = nullptr; } - // bind the current pool to the perforation point. + // Bind the current pool to the perforation point. Pool **tmp = &perforatedNode->data; *tmp = static_cast(this->LifoAlloc_.alloc(sizeof(Pool) * numPoolKinds)); if (tmp == nullptr) { @@ -830,8 +847,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer delete[] outcastEntries[pi]; return; } - // The above operations may have changed the size of pools! - // recalibrate the size of the pool. + // The above operations may have changed the size of pools! Recalibrate + // the size of the pool. newPoolInfo = getPoolData(); poolInfo[numDumps] = newPoolInfo; poolSize += poolInfo[numDumps].size; @@ -839,7 +856,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer memcpy(*tmp, pools, sizeof(Pool) * numPoolKinds); - // reset everything to the state that it was in when we started + // Reset everything to the state that it was in when we started. for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { if (!pools[poolIdx].reset(this->LifoAlloc_)) { this->fail_oom(); @@ -856,35 +873,38 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer // has been allocated, it is time to populate the new forward pool with // any entries that couldn't fit in the backwards pool. for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { - // Technically, the innermost pool will never have this issue, but it is easier - // to just handle this case. - // Since the pool entry was filled back-to-front, and in the next buffer, the elements - // should be front-to-back, this insertion also needs to proceed backwards + // Technically, the innermost pool will never have this issue, but + // it is easier to just handle this case. + // Since the pool entry was filled back-to-front, and in the next + // buffer, the elements should be front-to-back, this insertion also + // needs to proceed backwards int idx = outcasts[poolIdx].length(); - for (BufferOffset *iter = outcasts[poolIdx].end()-1; - iter != outcasts[poolIdx].begin()-1; + for (BufferOffset *iter = outcasts[poolIdx].end() - 1; + iter != outcasts[poolIdx].begin() - 1; --iter, --idx) { pools[poolIdx].updateLimiter(*iter); Inst *inst = this->getInst(*iter); - Asm::insertTokenIntoTag(pools[poolIdx].instSize, (uint8_t*)inst, outcasts[poolIdx].end()-1-iter); - pools[poolIdx].insertEntry(&outcastEntries[poolIdx][idx*pools[poolIdx].immSize], *iter, this->LifoAlloc_); + Asm::InsertTokenIntoTag(pools[poolIdx].instSize, (uint8_t*)inst, outcasts[poolIdx].end() - 1 - iter); + pools[poolIdx].insertEntry(&outcastEntries[poolIdx][idx * pools[poolIdx].immSize], *iter, this->LifoAlloc_); } delete[] outcastEntries[poolIdx]; } - // this (*2) is not technically kosher, but I want to get this bug fixed. - // It should actually be guardSize + the size of the instruction that we're attempting - // to insert. Unfortunately that vaue is never passed in. On ARM, these instructions - // are always 4 bytes, so guardSize is legit to use. + // This (*2) is not technically kosher, but I want to get this bug + // fixed. It should actually be guardSize + the size of the instruction + // that we're attempting to insert. Unfortunately that vaue is never + // passed in. On ARM, these instructions are always 4 bytes, so + // guardSize is legit to use. poolOffset = this->size() + guardSize * 2; poolOffset += headerSize; for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { - // There can still be an awkward situation where the element that triggered the - // initial dump didn't fit into the pool backwards, and now, still does not fit into - // this pool. Now it is necessary to go and dump this pool (note: this is almost - // certainly being called from dumpPool()). + // There can still be an awkward situation where the element that + // triggered the initial dump didn't fit into the pool backwards, + // and now, still does not fit into this pool. Now it is necessary + // to go and dump this pool (note: this is almost certainly being + // called from dumpPool()). poolOffset = pools[poolIdx].align(poolOffset); if (pools[poolIdx].checkFull(poolOffset)) { - // ONCE AGAIN, UH-OH, TIME TO BAIL + // ONCE AGAIN, UH-OH, TIME TO BAIL. dumpPool(); break; } @@ -907,13 +927,14 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer if (!perforation.assigned()) { JS_ASSERT(!canNotPlacePool); IonSpew(IonSpew_Pools, "[%d] No Perforation point selected, generating a new one", id); - // There isn't a perforation here, we need to dump the pool with a guard. + // There isn't a perforation here, we need to dump the pool with a + // guard. BufferOffset branch = this->nextOffset(); bool shouldMarkAsBranch = this->isNextBranch(); this->markNextAsBranch(); this->putBlob(guardSize, nullptr); BufferOffset afterPool = this->nextOffset(); - Asm::writePoolGuard(branch, this->getInst(branch), afterPool); + Asm::WritePoolGuard(branch, this->getInst(branch), afterPool); markGuard(); perforatedNode->isNatural = false; if (shouldMarkAsBranch) @@ -929,46 +950,50 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) { mozilla::DebugOnly beforePool = true; Pool *p = &pools[poolIdx]; - // Any entries that happened to be after the place we put our pool will need to be - // switched from the forward-referenced pool to the backward-refrenced pool. + // Any entries that happened to be after the place we put our pool + // will need to be switched from the forward-referenced pool to the + // backward-refrenced pool. int idx = 0; for (BufferOffset *iter = p->loadOffsets.begin(); iter != p->loadOffsets.end(); ++iter, ++idx) { if (iter->getOffset() >= perforation.getOffset()) { IonSpew(IonSpew_Pools, "[%d] Pushing entry %d in pool %d into the backwards section.", id, idx, poolIdx); - // insert this into the rear part of the pool. + // Insert this into the rear part of the pool. int offset = idx * p->immSize; p->other->insertEntry(&p->poolData[offset], BufferOffset(*iter), this->LifoAlloc_); - // update the limiting entry for this pool. + // Update the limiting entry for this pool. p->other->updateLimiter(*iter); - // Update the current pool to report fewer entries. They are now in the - // backwards section. + // Update the current pool to report fewer entries. They + // are now in the backwards section. p->numEntries--; beforePool = false; } else { JS_ASSERT(beforePool); - // align the pool offset to the alignment of this pool - // it already only aligns when the pool has data in it, but we want to not - // align when all entries will end up in the backwards half of the pool + // Align the pool offset to the alignment of this pool it + // already only aligns when the pool has data in it, but we + // want to not align when all entries will end up in the + // backwards half of the pool. poolOffset = p->align(poolOffset); IonSpew(IonSpew_Pools, "[%d] Entry %d in pool %d is before the pool.", id, idx, poolIdx); - // Everything here is known, we can safely do the necessary substitutions - Inst * inst = this->getInst(*iter); - // We need to manually compute the offset, including a possible bias. + // Everything here is known, we can safely do the necessary + // substitutions. + Inst *inst = this->getInst(*iter); + // We need to manually compute the offset, including a + // possible bias. int codeOffset = poolOffset - iter->getOffset(); - // That is, patchConstantPoolLoad wants to be handed the address of the - // pool entry that is being loaded. We need to do a non-trivial amount - // of math here, since the pool that we've made does not actually reside there - // in memory. + // That is, PatchConstantPoolLoad wants to be handed the + // address of the pool entry that is being loaded. We need + // to do a non-trivial amount of math here, since the pool + // that we've made does not actually reside there in memory. IonSpew(IonSpew_Pools, "[%d] Fixing offset to %d", id, codeOffset - magicAlign); - Asm::patchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign); + Asm::PatchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign); } } // Some number of entries have been positively identified as being // in this section of the pool. Before processing the next pool, - // update the offset from the beginning of the buffer + // update the offset from the beginning of the buffer. poolOffset += p->numEntries * p->immSize; } poolOffset = footerSize; @@ -976,8 +1001,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) { Pool *tmp = pools[poolIdx].other; if (tmp->checkFullBackref(poolOffset, perforation.getOffset())) { - // GNAAAH. While we rotated elements into the back half, one of them filled up - // Now, dumping the back half is necessary... + // GNAAAH. While we rotated elements into the back half, one of + // them filled up. Now, dumping the back half is necessary... finishPool(); break; } @@ -1014,10 +1039,10 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer } // Can't assert anything here, since the first pool may be after the target. } - Asm::retargetNearBranch(i, offset, false); + Asm::RetargetNearBranch(i, offset, false); } - // Mark the next instruction as a valid guard. This means we can place a pool here. + // Mark the next instruction as a valid guard. This means we can place a pool here. void markGuard() { // If we are in a no pool zone then there is no point in dogearing // this branch as a place to go back to @@ -1032,21 +1057,22 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer void enterNoPool() { insertNopFill(); if (!canNotPlacePool && !perforation.assigned()) { - // Embarassing mode: The Assembler requests the start of a no pool section - // and there have been no valid places that a pool could be dumped thusfar. - // If a pool were to fill up before this no-pool section ends, we need to go back - // in the stream and enter a pool guard after the fact. This is feasable, but - // for now, it is easier to just allocate a junk instruction, default it to a nop, and - // finally, if the pool *is* needed, patch the nop to apool guard. - // What the assembler requests: + // Embarassing mode: The Assembler requests the start of a no pool + // section and there have been no valid places that a pool could be + // dumped thusfar. If a pool were to fill up before this no-pool + // section ends, we need to go back in the stream and enter a pool + // guard after the fact. This is feasable, but for now, it is easier + // to just allocate a junk instruction, default it to a nop, and + // finally, if the pool *is* needed, patch the nop to apool + // guard. What the assembler requests: // #request no-pool zone // push pc // blx r12 // #end no-pool zone - // however, if we would need to insert a pool, and there is no perforation point... - // so, actual generated code: + // however, if we would need to insert a pool, and there is no + // perforation point... so, actual generated code: // b next; <= perforation point // next: @@ -1058,7 +1084,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer this->markNextAsBranch(); this->putBlob(guardSize, nullptr); BufferOffset afterPool = this->nextOffset(); - Asm::writePoolGuard(branch, this->getInst(branch), afterPool); + Asm::WritePoolGuard(branch, this->getInst(branch), afterPool); markGuard(); if (perforatedNode != nullptr) perforatedNode->isNatural = false; @@ -1075,9 +1101,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer return &pools[idx]; } void markNextAsBranch() { - // If the previous thing inserted was the last instruction of - // the node, then whoops, we want to mark the first instruction of - // the next node. + // If the previous thing inserted was the last instruction of the node, + // then whoops, we want to mark the first instruction of the next node. this->ensureSpace(InstBaseSize); JS_ASSERT(*this->getTail() != nullptr); (*this->getTail())->markNextAsBranch(); @@ -1100,8 +1125,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer int cur = 0; while(cur < numDumps && poolInfo[cur].offset <= offset) cur++; - // poolInfo[curDumpsite] is now larger than the offset - // either this is the first one, or the previous is the last one we care about + // poolInfo[curDumpsite] is now larger than the offset either this is + // the first one, or the previous is the last one we care about. if (cur == 0) return 0; return poolInfo[cur-1].finalPos - poolInfo[cur-1].offset; @@ -1126,9 +1151,9 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer } if (poolNum != nullptr) *poolNum = idx; - // If this offset is contained in any finished pool, forward or backwards, p now - // points to that pool, if it is not in any pool (should be in the currently building pool) - // then p is nullptr. + // If this offset is contained in any finished pool, forward or + // backwards, p now points to that pool, if it is not in any pool + // (should be in the currently building pool) then p is nullptr. if (p == nullptr) { p = &pools[poolKind]; if (offset >= p->getPoolSize()) { @@ -1163,16 +1188,15 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer Pool *poolGroup = pi->slice->data; uint32_t start = pi->finalPos - pi->size + headerSize; /// The order of the pools is: - // A B C C_Rev B_Rev A_Rev, so in the initial pass, - // go through the pools forwards, and in the second pass - // go through them in reverse order. + // A B C C_Rev B_Rev A_Rev, so in the initial pass, go through the pools + // forwards, and in the second pass go through them in reverse order. for (int idx = 0; idx < numPoolKinds; idx++) { if (&poolGroup[idx] == realPool) { return start + offset; } start = poolGroup[idx].addPoolSize(start); } - for (int idx = numPoolKinds-1; idx >= 0; idx--) { + for (int idx = numPoolKinds - 1; idx >= 0; idx--) { if (poolGroup[idx].other == realPool) { return start + offset; }