Bug 784739 - Switch from NULL to nullptr in js/src/jit/ subdirectories; r=ehsan

--HG--
extra : rebase_source : 0b327393ba130a0c8c60cefce71154a660462e09
This commit is contained in:
Birunthan Mohanathas 2013-09-27 16:30:34 -04:00
Родитель f389fbf6c9
Коммит c22a9fda18
24 изменённых файлов: 204 добавлений и 204 удалений

Просмотреть файл

@ -246,7 +246,7 @@ InstDTR::asTHIS(const Instruction &i)
{
if (isTHIS(i))
return (InstDTR*)&i;
return NULL;
return nullptr;
}
bool
@ -260,7 +260,7 @@ InstLDR::asTHIS(const Instruction &i)
{
if (isTHIS(i))
return (InstLDR*)&i;
return NULL;
return nullptr;
}
InstNOP *
@ -268,7 +268,7 @@ InstNOP::asTHIS(Instruction &i)
{
if (isTHIS(i))
return (InstNOP*) (&i);
return NULL;
return nullptr;
}
bool
@ -288,7 +288,7 @@ InstBranchReg::asTHIS(const Instruction &i)
{
if (isTHIS(i))
return (InstBranchReg*)&i;
return NULL;
return nullptr;
}
void
InstBranchReg::extractDest(Register *dest)
@ -312,7 +312,7 @@ InstBranchImm::asTHIS(const Instruction &i)
{
if (isTHIS(i))
return (InstBranchImm*)&i;
return NULL;
return nullptr;
}
void
@ -332,7 +332,7 @@ InstBXReg::asTHIS(const Instruction &i)
{
if (isTHIS(i))
return (InstBXReg*)&i;
return NULL;
return nullptr;
}
bool
@ -346,7 +346,7 @@ InstBLXReg::asTHIS(const Instruction &i)
{
if (isTHIS(i))
return (InstBLXReg*)&i;
return NULL;
return nullptr;
}
bool
@ -359,7 +359,7 @@ InstBImm::asTHIS(const Instruction &i)
{
if (isTHIS(i))
return (InstBImm*)&i;
return NULL;
return nullptr;
}
bool
@ -373,7 +373,7 @@ InstBLImm::asTHIS(Instruction &i)
{
if (isTHIS(i))
return (InstBLImm*)&i;
return NULL;
return nullptr;
}
bool
@ -386,7 +386,7 @@ InstMovWT::asTHIS(Instruction &i)
{
if (isTHIS(i))
return (InstMovWT*)&i;
return NULL;
return nullptr;
}
void
@ -422,14 +422,14 @@ InstMovW::asTHIS(const Instruction &i)
{
if (isTHIS(i))
return (InstMovW*) (&i);
return NULL;
return nullptr;
}
InstMovT *
InstMovT::asTHIS(const Instruction &i)
{
if (isTHIS(i))
return (InstMovT*) (&i);
return NULL;
return nullptr;
}
bool
@ -443,7 +443,7 @@ InstALU::asTHIS(const Instruction &i)
{
if (isTHIS(i))
return (InstALU*) (&i);
return NULL;
return nullptr;
}
bool
InstALU::isTHIS(const Instruction &i)
@ -493,7 +493,7 @@ InstCMP::asTHIS(const Instruction &i)
{
if (isTHIS(i))
return (InstCMP*) (&i);
return NULL;
return nullptr;
}
bool
@ -507,7 +507,7 @@ InstMOV::asTHIS(const Instruction &i)
{
if (isTHIS(i))
return (InstMOV*) (&i);
return NULL;
return nullptr;
}
bool
@ -742,7 +742,7 @@ uintptr_t
Assembler::getPointer(uint8_t *instPtr)
{
InstructionIterator iter((Instruction*)instPtr);
uintptr_t ret = (uintptr_t)getPtr32Target(&iter, NULL, NULL);
uintptr_t ret = (uintptr_t)getPtr32Target(&iter, nullptr, nullptr);
return ret;
}
@ -1342,7 +1342,7 @@ Assembler::bytesNeeded() const
BufferOffset
Assembler::writeInst(uint32_t x, uint32_t *dest)
{
if (dest == NULL)
if (dest == nullptr)
return m_buffer.putInt(x);
writeInstStatic(x, dest);
@ -1351,7 +1351,7 @@ Assembler::writeInst(uint32_t x, uint32_t *dest)
void
Assembler::writeInstStatic(uint32_t x, uint32_t *dest)
{
JS_ASSERT(dest != NULL);
JS_ASSERT(dest != nullptr);
*dest = x;
}
@ -1816,7 +1816,7 @@ Assembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
// don't matter (except the condition code, since that is always preserved across
// patchings) but if it does not get bound later,
// then we want to make sure this is a load from the pool entry (and the pool entry
// should be NULL so it will crash).
// should be nullptr so it will crash).
if (data.isValidPoolHint()) {
dummy->as_dtr(IsLoad, 32, Offset, pc,
DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)),
@ -2508,7 +2508,7 @@ struct PoolHeader : Instruction {
}
static const PoolHeader *asTHIS(const Instruction &i) {
if (!isTHIS(i))
return NULL;
return nullptr;
return static_cast<const PoolHeader*>(&i);
}
};
@ -2605,7 +2605,7 @@ uint8_t *
Assembler::nextInstruction(uint8_t *inst_, uint32_t *count)
{
Instruction *inst = reinterpret_cast<Instruction*>(inst_);
if (count != NULL)
if (count != nullptr)
*count += sizeof(Instruction);
return reinterpret_cast<uint8_t*>(inst->next());
}
@ -2621,7 +2621,7 @@ InstIsGuard(Instruction *inst, const PoolHeader **ph)
return false;
// See if the next instruction is a pool header.
*ph = (inst+1)->as<const PoolHeader>();
return *ph != NULL;
return *ph != nullptr;
}
static bool
@ -2815,7 +2815,7 @@ AutoFlushCache::~AutoFlushCache()
IonSpewCont(IonSpew_CacheFlush, ">", name_);
if (runtime_->flusher() == this) {
IonSpewFin(IonSpew_CacheFlush);
runtime_->setFlusher(NULL);
runtime_->setFlusher(nullptr);
}
}
@ -2833,7 +2833,7 @@ AutoFlushCache::flushAnyway()
if (start_) {
JSC::ExecutableAllocator::cacheFlush((void *)start_, size_t(stop_ - start_ + sizeof(Instruction)));
} else {
JSC::ExecutableAllocator::cacheFlush(NULL, 0xff000000);
JSC::ExecutableAllocator::cacheFlush(nullptr, 0xff000000);
}
used_ = false;
}
@ -2845,4 +2845,4 @@ InstructionIterator::InstructionIterator(Instruction *i_) : i(i_) {
i = i->next();
}
}
Assembler *Assembler::dummy = NULL;
Assembler *Assembler::dummy = nullptr;

Просмотреть файл

@ -1313,7 +1313,7 @@ class Assembler
// Set up the forwards 32 bit region
new (int32Pool) Pool (4096, 4, 4, 8, 4, m_buffer.LifoAlloc_, false, true, &pools_[3]);
for (int i = 0; i < 4; i++) {
if (pools_[i].poolData == NULL) {
if (pools_[i].poolData == nullptr) {
m_buffer.fail_oom();
return;
}
@ -1359,7 +1359,7 @@ class Assembler
static uintptr_t getPointer(uint8_t *);
template <class Iter>
static const uint32_t * getPtr32Target(Iter *iter, Register *dest = NULL, RelocStyle *rs = NULL);
static const uint32_t * getPtr32Target(Iter *iter, Register *dest = nullptr, RelocStyle *rs = nullptr);
bool oom() const;
@ -1401,13 +1401,13 @@ class Assembler
size_t bytesNeeded() const;
// Write a blob of binary into the instruction stream *OR*
// into a destination address. If dest is NULL (the default), then the
// into a destination address. If dest is nullptr (the default), then the
// instruction gets written into the instruction stream. If dest is not null
// it is interpreted as a pointer to the location that we want the
// instruction to be written.
BufferOffset writeInst(uint32_t x, uint32_t *dest = NULL);
BufferOffset writeInst(uint32_t x, uint32_t *dest = nullptr);
// A static variant for the cases where we don't want to have an assembler
// object at all. Normally, you would use the dummy (NULL) object.
// object at all. Normally, you would use the dummy (nullptr) object.
static void writeInstStatic(uint32_t x, uint32_t *dest);
public:
@ -1416,10 +1416,10 @@ class Assembler
BufferOffset align(int alignment);
BufferOffset as_nop();
BufferOffset as_alu(Register dest, Register src1, Operand2 op2,
ALUOp op, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = NULL);
ALUOp op, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = nullptr);
BufferOffset as_mov(Register dest,
Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = NULL);
Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = nullptr);
BufferOffset as_mvn(Register dest, Operand2 op2,
SetCond_ sc = NoSetCond, Condition c = Always);
// logical operations
@ -1457,8 +1457,8 @@ class Assembler
// Not quite ALU worthy, but useful none the less:
// These also have the isue of these being formatted
// completly differently from the standard ALU operations.
BufferOffset as_movw(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = NULL);
BufferOffset as_movt(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = NULL);
BufferOffset as_movw(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = nullptr);
BufferOffset as_movt(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = nullptr);
BufferOffset as_genmul(Register d1, Register d2, Register rm, Register rn,
MULOp op, SetCond_ sc, Condition c = Always);
@ -1486,26 +1486,26 @@ class Assembler
// Using an int to differentiate between 8 bits and 32 bits is
// overkill, but meh
BufferOffset as_dtr(LoadStore ls, int size, Index mode,
Register rt, DTRAddr addr, Condition c = Always, uint32_t *dest = NULL);
Register rt, DTRAddr addr, Condition c = Always, uint32_t *dest = nullptr);
// Handles all of the other integral data transferring functions:
// ldrsb, ldrsh, ldrd, etc.
// size is given in bits.
BufferOffset as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
Register rt, EDtrAddr addr, Condition c = Always, uint32_t *dest = NULL);
Register rt, EDtrAddr addr, Condition c = Always, uint32_t *dest = nullptr);
BufferOffset as_dtm(LoadStore ls, Register rn, uint32_t mask,
DTMMode mode, DTMWriteBack wb, Condition c = Always);
//overwrite a pool entry with new data.
void as_WritePoolEntry(Instruction *addr, Condition c, uint32_t data);
// load a 32 bit immediate from a pool into a register
BufferOffset as_Imm32Pool(Register dest, uint32_t value, ARMBuffer::PoolEntry *pe = NULL, Condition c = Always);
BufferOffset as_Imm32Pool(Register dest, uint32_t value, ARMBuffer::PoolEntry *pe = nullptr, Condition c = Always);
// make a patchable jump that can target the entire 32 bit address space.
BufferOffset as_BranchPool(uint32_t value, RepatchLabel *label, ARMBuffer::PoolEntry *pe = NULL, Condition c = Always);
BufferOffset as_BranchPool(uint32_t value, RepatchLabel *label, ARMBuffer::PoolEntry *pe = nullptr, Condition c = Always);
// load a 64 bit floating point immediate from a pool into a register
BufferOffset as_FImm64Pool(VFPRegister dest, double value, ARMBuffer::PoolEntry *pe = NULL, Condition c = Always);
BufferOffset as_FImm64Pool(VFPRegister dest, double value, ARMBuffer::PoolEntry *pe = nullptr, Condition c = Always);
// load a 32 bit floating point immediate from a pool into a register
BufferOffset as_FImm32Pool(VFPRegister dest, float value, ARMBuffer::PoolEntry *pe = NULL, Condition c = Always);
BufferOffset as_FImm32Pool(VFPRegister dest, float value, ARMBuffer::PoolEntry *pe = nullptr, Condition c = Always);
// Control flow stuff:
@ -1547,7 +1547,7 @@ class Assembler
isSingle = 0 << 8
};
BufferOffset writeVFPInst(vfp_size sz, uint32_t blob, uint32_t *dest=NULL);
BufferOffset writeVFPInst(vfp_size sz, uint32_t blob, uint32_t *dest=nullptr);
// Unityped variants: all registers hold the same (ieee754 single/double)
// notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
BufferOffset as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
@ -1619,7 +1619,7 @@ class Assembler
/* xfer between VFP and memory*/
BufferOffset as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
Condition c = Always /* vfp doesn't have a wb option*/,
uint32_t *dest = NULL);
uint32_t *dest = nullptr);
// VFP's ldm/stm work differently from the standard arm ones.
// You can only transfer a range
@ -1811,7 +1811,7 @@ class Assembler
static uint32_t alignDoubleArg(uint32_t offset) {
return (offset+1)&~1;
}
static uint8_t *nextInstruction(uint8_t *instruction, uint32_t *count = NULL);
static uint8_t *nextInstruction(uint8_t *instruction, uint32_t *count = nullptr);
// Toggle a jmp or cmp emitted by toggledJump().
static void ToggleToJmp(CodeLocationLabel inst_);

Просмотреть файл

@ -80,11 +80,11 @@ class IonJSFrameLayout : public IonEntryFrameLayout
return offsetof(IonJSFrameLayout, numActualArgs_);
}
static size_t offsetOfThis() {
IonJSFrameLayout *base = NULL;
IonJSFrameLayout *base = nullptr;
return reinterpret_cast<size_t>(&base->argv()[0]);
}
static size_t offsetOfActualArgs() {
IonJSFrameLayout *base = NULL;
IonJSFrameLayout *base = nullptr;
// +1 to skip |this|.
return reinterpret_cast<size_t>(&base->argv()[1]);
}
@ -219,15 +219,15 @@ class IonExitFrameLayout : public IonCommonFrameLayout
// each wrapper are pushed before the exit frame. This correspond exactly
// to the value of the argBase register of the generateVMWrapper function.
inline uint8_t *argBase() {
JS_ASSERT(footer()->ionCode() != NULL);
JS_ASSERT(footer()->ionCode() != nullptr);
return top();
}
inline bool isWrapperExit() {
return footer()->function() != NULL;
return footer()->function() != nullptr;
}
inline bool isNativeExit() {
return footer()->ionCode() == NULL;
return footer()->ionCode() == nullptr;
}
inline bool isOOLNativeExit() {
return footer()->ionCode() == ION_FRAME_OOL_NATIVE;

Просмотреть файл

@ -292,7 +292,7 @@ class LTableSwitch : public LInstructionHelper<0, 1, 1>
}
// This is added to share the same CodeGenerator prefixes.
const LAllocation *tempPointer() {
return NULL;
return nullptr;
}
};
@ -323,7 +323,7 @@ class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 2>
return getTemp(1)->output();
}
const LAllocation *tempPointer() {
return NULL;
return nullptr;
}
};

Просмотреть файл

@ -365,11 +365,11 @@ MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest,
// Going to have to use a load. If the operation is a move, then just move it into the
// destination register
if (op == op_mov) {
as_Imm32Pool(dest, imm.value, NULL, c);
as_Imm32Pool(dest, imm.value, nullptr, c);
return;
} else {
// If this isn't just going into a register, then stick it in a temp, and then proceed.
as_Imm32Pool(ScratchRegister, imm.value, NULL, c);
as_Imm32Pool(ScratchRegister, imm.value, nullptr, c);
}
}
as_alu(dest, src1, O2Reg(ScratchRegister), op, sc, c);
@ -398,8 +398,8 @@ MacroAssemblerARM::ma_nop()
Instruction *
NextInst(Instruction *i)
{
if (i == NULL)
return NULL;
if (i == nullptr)
return nullptr;
return i->next();
}
@ -419,15 +419,15 @@ MacroAssemblerARM::ma_movPatchable(Imm32 imm_, Register dest, Assembler::Conditi
switch(rs) {
case L_MOVWT:
as_movw(dest, Imm16(imm & 0xffff), c, i);
// i can be NULL here. that just means "insert in the next in sequence."
// NextInst is special cased to not do anything when it is passed NULL, so two
// consecutive instructions will be inserted.
// i can be nullptr here. that just means "insert in the next in sequence."
// NextInst is special cased to not do anything when it is passed nullptr, so
// two consecutive instructions will be inserted.
i = NextInst(i);
as_movt(dest, Imm16(imm >> 16 & 0xffff), c, i);
break;
case L_LDR:
if(i == NULL)
as_Imm32Pool(dest, imm, NULL, c);
if(i == nullptr)
as_Imm32Pool(dest, imm, nullptr, c);
else
as_WritePoolEntry(i, c, imm);
break;
@ -1319,11 +1319,11 @@ MacroAssemblerARM::ma_b(void *target, Relocation::Kind reloc, Assembler::Conditi
as_bx(ScratchRegister, c);
break;
case Assembler::B_LDR_BX:
as_Imm32Pool(ScratchRegister, trg, NULL, c);
as_Imm32Pool(ScratchRegister, trg, nullptr, c);
as_bx(ScratchRegister, c);
break;
case Assembler::B_LDR:
as_Imm32Pool(pc, trg, NULL, c);
as_Imm32Pool(pc, trg, nullptr, c);
if (c == Always)
m_buffer.markGuard();
break;
@ -1477,7 +1477,7 @@ MacroAssemblerARM::ma_vimm(double value, FloatRegister dest, Condition cc)
}
}
// Fall back to putting the value in a pool.
as_FImm64Pool(dest, value, NULL, cc);
as_FImm64Pool(dest, value, nullptr, cc);
}
static inline uint32_t
@ -1505,7 +1505,7 @@ MacroAssemblerARM::ma_vimm_f32(float value, FloatRegister dest, Condition cc)
}
}
// Fall back to putting the value in a pool.
as_FImm32Pool(vd, value, NULL, cc);
as_FImm32Pool(vd, value, nullptr, cc);
}
void

Просмотреть файл

@ -88,9 +88,9 @@ class MacroAssemblerARM : public Assembler
SetCond_ sc = NoSetCond, Condition c = Always);
void ma_nop();
void ma_movPatchable(Imm32 imm, Register dest, Assembler::Condition c,
RelocStyle rs, Instruction *i = NULL);
RelocStyle rs, Instruction *i = nullptr);
void ma_movPatchable(ImmPtr imm, Register dest, Assembler::Condition c,
RelocStyle rs, Instruction *i = NULL);
RelocStyle rs, Instruction *i = nullptr);
// These should likely be wrapped up as a set of macros
// or something like that. I cannot think of a good reason
// to explicitly have all of this code.

Просмотреть файл

@ -472,7 +472,7 @@ IonRuntime::generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void *
// Call the target function.
// Note that this code assumes the function is JITted.
masm.ma_ldr(DTRAddr(r1, DtrOffImm(JSFunction::offsetOfNativeOrScript())), r3);
masm.loadBaselineOrIonRaw(r3, r3, mode, NULL);
masm.loadBaselineOrIonRaw(r3, r3, mode, nullptr);
masm.ma_callIonHalfPush(r3);
uint32_t returnOffset = masm.currentOffset();
@ -819,12 +819,12 @@ IonRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
Linker linker(masm);
IonCode *wrapper = linker.newCode(cx, JSC::OTHER_CODE);
if (!wrapper)
return NULL;
return nullptr;
// linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
// use relookupOrAdd instead of add.
if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
return NULL;
return nullptr;
#ifdef JS_ION_PERF
writePerfSpewerIonCodeProfile(wrapper, "VMWrapper");
@ -890,14 +890,14 @@ IonRuntime::generateDebugTrapHandler(JSContext *cx)
masm.subPtr(Imm32(BaselineFrame::Size()), scratch1);
// Enter a stub frame and call the HandleDebugTrap VM function. Ensure
// the stub frame has a NULL ICStub pointer, since this pointer is marked
// during GC.
masm.movePtr(ImmPtr(NULL), BaselineStubReg);
// the stub frame has a nullptr ICStub pointer, since this pointer is
// marked during GC.
masm.movePtr(ImmPtr(nullptr), BaselineStubReg);
EmitEnterStubFrame(masm, scratch2);
IonCode *code = cx->runtime()->ionRuntime()->getVMWrapper(HandleDebugTrapInfo);
if (!code)
return NULL;
return nullptr;
masm.push(lr);
masm.push(scratch1);

Просмотреть файл

@ -110,7 +110,7 @@ IsCompilingAsmJS()
{
// asm.js compilation pushes an IonContext with a null JSCompartment.
IonContext *ictx = MaybeGetIonContext();
return ictx && ictx->compartment == NULL;
return ictx && ictx->compartment == nullptr;
}
#endif
@ -170,7 +170,7 @@ struct PatchedImmPtr {
void *value;
explicit PatchedImmPtr()
: value(NULL)
: value(nullptr)
{ }
explicit PatchedImmPtr(const void *value)
: value(const_cast<void*>(value))
@ -232,7 +232,7 @@ struct PatchedAbsoluteAddress {
void *addr;
explicit PatchedAbsoluteAddress()
: addr(NULL)
: addr(nullptr)
{ }
explicit PatchedAbsoluteAddress(const void *addr)
: addr(const_cast<void*>(addr))
@ -554,7 +554,7 @@ class CodeLocationJump
public:
CodeLocationJump() {
raw_ = NULL;
raw_ = nullptr;
setUninitialized();
#ifdef JS_SMALL_BRANCH
jumpTableEntry_ = (uint8_t *) 0xdeadab1e;
@ -573,7 +573,7 @@ class CodeLocationJump
#endif
}
void repoint(IonCode *code, MacroAssembler* masm = NULL);
void repoint(IonCode *code, MacroAssembler* masm = nullptr);
uint8_t *raw() const {
JS_ASSERT(state_ == Absolute);
@ -618,7 +618,7 @@ class CodeLocationLabel
public:
CodeLocationLabel() {
raw_ = NULL;
raw_ = nullptr;
setUninitialized();
}
CodeLocationLabel(IonCode *code, CodeOffsetLabel base) {
@ -642,7 +642,7 @@ class CodeLocationLabel
return raw_ - other.raw_;
}
void repoint(IonCode *code, MacroAssembler *masm = NULL);
void repoint(IonCode *code, MacroAssembler *masm = nullptr);
#ifdef DEBUG
bool isSet() const {

Просмотреть файл

@ -148,5 +148,5 @@ AutoFlushCache::~AutoFlushCache()
return;
if (runtime_->flusher() == this)
runtime_->setFlusher(NULL);
runtime_->setFlusher(nullptr);
}

Просмотреть файл

@ -72,11 +72,11 @@ class BaselineCompilerShared
ICEntry *allocateICEntry(ICStub *stub, bool isForOp) {
if (!stub)
return NULL;
return nullptr;
// Create the entry and add it to the vector.
if (!icEntries_.append(ICEntry((uint32_t) (pc - script->code), isForOp)))
return NULL;
return nullptr;
ICEntry &vecEntry = icEntries_[icEntries_.length() - 1];
// Set the first stub for the IC entry to the fallback stub

Просмотреть файл

@ -35,13 +35,13 @@ CodeGeneratorShared::ensureMasm(MacroAssembler *masmArg)
}
CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masmArg)
: oolIns(NULL),
: oolIns(nullptr),
maybeMasm_(),
masm(ensureMasm(masmArg)),
gen(gen),
graph(*graph),
current(NULL),
deoptTable_(NULL),
current(nullptr),
deoptTable_(nullptr),
#ifdef DEBUG
pushedArgs_(0),
#endif
@ -99,7 +99,7 @@ CodeGeneratorShared::generateOutOfLineCode()
if (!outOfLineCode_[i]->generate(this))
return false;
}
oolIns = NULL;
oolIns = nullptr;
return true;
}
@ -114,7 +114,7 @@ CodeGeneratorShared::addOutOfLineCode(OutOfLineCode *code)
if (oolIns)
code->setSource(oolIns->script(), oolIns->pc());
else
code->setSource(current ? current->mir()->info().script() : NULL, lastPC_);
code->setSource(current ? current->mir()->info().script() : nullptr, lastPC_);
return outOfLineCode_.append(code);
}
@ -681,7 +681,7 @@ CodeGeneratorShared::oolTruncateDouble(const FloatRegister &src, const Register
{
OutOfLineTruncateSlow *ool = new OutOfLineTruncateSlow(src, dest);
if (!addOutOfLineCode(ool))
return NULL;
return nullptr;
return ool;
}
@ -760,7 +760,7 @@ CodeGeneratorShared::oolAbortPar(ParallelBailoutCause cause, MBasicBlock *basicB
{
OutOfLineAbortPar *ool = new OutOfLineAbortPar(cause, basicBlock, bytecode);
if (!ool || !addOutOfLineCode(ool))
return NULL;
return nullptr;
return ool;
}
@ -784,21 +784,21 @@ CodeGeneratorShared::oolPropagateAbortPar(LInstruction *lir)
{
OutOfLinePropagateAbortPar *ool = new OutOfLinePropagateAbortPar(lir);
if (!ool || !addOutOfLineCode(ool))
return NULL;
return nullptr;
return ool;
}
bool
OutOfLineAbortPar::generate(CodeGeneratorShared *codegen)
{
codegen->callTraceLIR(0xDEADBEEF, NULL, "AbortPar");
codegen->callTraceLIR(0xDEADBEEF, nullptr, "AbortPar");
return codegen->visitOutOfLineAbortPar(this);
}
bool
OutOfLinePropagateAbortPar::generate(CodeGeneratorShared *codegen)
{
codegen->callTraceLIR(0xDEADBEEF, NULL, "AbortPar");
codegen->callTraceLIR(0xDEADBEEF, nullptr, "AbortPar");
return codegen->visitOutOfLinePropagateAbortPar(this);
}
@ -834,15 +834,15 @@ CodeGeneratorShared::callTraceLIR(uint32_t blockIndex, LInstruction *lir,
script = mir->block()->info().script();
pc = mir->trackedPc();
} else {
mirOpName = NULL;
script = NULL;
pc = NULL;
mirOpName = nullptr;
script = nullptr;
pc = nullptr;
}
} else {
blockIndex = lirIndex = 0xDEADBEEF;
lirOpName = mirOpName = bailoutName;
script = NULL;
pc = NULL;
script = nullptr;
pc = nullptr;
}
masm.store32(Imm32(blockIndex),
@ -892,12 +892,12 @@ CodeGeneratorShared::labelForBackedgeWithImplicitCheck(MBasicBlock *mir)
// The interrupt check should be the first instruction in the
// loop header other than the initial label and move groups.
JS_ASSERT(iter->isInterruptCheck() || iter->isCheckInterruptPar());
return NULL;
return nullptr;
}
}
}
return NULL;
return nullptr;
}
void

Просмотреть файл

@ -364,13 +364,13 @@ class CodeGeneratorShared : public LInstructionVisitor
masm.storeCallResultValue(t);
}
bool callVM(const VMFunction &f, LInstruction *ins, const Register *dynStack = NULL);
bool callVM(const VMFunction &f, LInstruction *ins, const Register *dynStack = nullptr);
template <class ArgSeq, class StoreOutputTo>
inline OutOfLineCode *oolCallVM(const VMFunction &fun, LInstruction *ins, const ArgSeq &args,
const StoreOutputTo &out);
bool callVM(const VMFunctionsModal &f, LInstruction *ins, const Register *dynStack = NULL) {
bool callVM(const VMFunctionsModal &f, LInstruction *ins, const Register *dynStack = nullptr) {
return callVM(f[gen->info().executionMode()], ins, dynStack);
}
@ -411,7 +411,7 @@ class CodeGeneratorShared : public LInstructionVisitor
bool visitOutOfLineTruncateSlow(OutOfLineTruncateSlow *ool);
public:
bool callTraceLIR(uint32_t blockIndex, LInstruction *lir, const char *bailoutName = NULL);
bool callTraceLIR(uint32_t blockIndex, LInstruction *lir, const char *bailoutName = nullptr);
// Parallel aborts:
//
@ -445,8 +445,8 @@ class OutOfLineCode : public TempObject
public:
OutOfLineCode()
: framePushed_(0),
pc_(NULL),
script_(NULL)
pc_(nullptr),
script_(nullptr)
{ }
virtual bool generate(CodeGeneratorShared *codegen) = 0;
@ -672,7 +672,7 @@ CodeGeneratorShared::oolCallVM(const VMFunction &fun, LInstruction *lir, const A
{
OutOfLineCode *ool = new OutOfLineCallVM<ArgSeq, StoreOutputTo>(lir, fun, args, out);
if (!addOutOfLineCode(ool))
return NULL;
return nullptr;
return ool;
}

Просмотреть файл

@ -795,7 +795,7 @@ CodeGeneratorX86Shared::visitDivI(LDivI *ins)
JS_ASSERT(output == eax);
Label done;
ReturnZero *ool = NULL;
ReturnZero *ool = nullptr;
// Handle divide by zero.
if (mir->canBeDivideByZero()) {
@ -952,8 +952,8 @@ CodeGeneratorX86Shared::visitModI(LModI *ins)
}
Label done;
ReturnZero *ool = NULL;
ModOverflowCheck *overflow = NULL;
ReturnZero *ool = nullptr;
ModOverflowCheck *overflow = nullptr;
// Prevent divide by zero.
if (ins->mir()->canBeDivideByZero()) {

Просмотреть файл

@ -62,8 +62,8 @@ struct BufferSlice {
BufferSlice *getNext() { return this->next; }
BufferSlice *getPrev() { return this->prev; }
void setNext(BufferSlice<SliceSize> *next_) {
JS_ASSERT(this->next == NULL);
JS_ASSERT(next_->prev == NULL);
JS_ASSERT(this->next == nullptr);
JS_ASSERT(next_->prev == nullptr);
this->next = next_;
next_->prev = this;
}
@ -72,9 +72,9 @@ struct BufferSlice {
unsigned int size() {
return nodeSize;
}
BufferSlice() : prev(NULL), next(NULL), nodeSize(0) {}
BufferSlice() : prev(nullptr), next(nullptr), nodeSize(0) {}
void putBlob(uint32_t instSize, uint8_t* inst) {
if (inst != NULL)
if (inst != nullptr)
memcpy(&instructions[size()], inst, instSize);
nodeSize += instSize;
}
@ -84,7 +84,7 @@ template<int SliceSize, class Inst>
struct AssemblerBuffer
{
public:
AssemblerBuffer() : head(NULL), tail(NULL), m_oom(false), m_bail(false), bufferSize(0), LifoAlloc_(8192) {}
AssemblerBuffer() : head(nullptr), tail(nullptr), m_oom(false), m_bail(false), bufferSize(0), LifoAlloc_(8192) {}
protected:
typedef BufferSlice<SliceSize> Slice;
typedef AssemblerBuffer<SliceSize, Inst> AssemblerBuffer_;
@ -105,23 +105,23 @@ struct AssemblerBuffer
Slice *tmp = static_cast<Slice*>(a.alloc(sizeof(Slice)));
if (!tmp) {
m_oom = true;
return NULL;
return nullptr;
}
new (tmp) Slice;
return tmp;
}
bool ensureSpace(int size) {
if (tail != NULL && tail->size()+size <= SliceSize)
if (tail != nullptr && tail->size()+size <= SliceSize)
return true;
Slice *tmp = newSlice(LifoAlloc_);
if (tmp == NULL)
if (tmp == nullptr)
return false;
if (tail != NULL) {
if (tail != nullptr) {
bufferSize += tail->size();
tail->setNext(tmp);
}
tail = tmp;
if (head == NULL) {
if (head == nullptr) {
finger = tmp;
finger_offset = 0;
head = tmp;
@ -149,7 +149,7 @@ struct AssemblerBuffer
}
unsigned int size() const {
int executableSize;
if (tail != NULL)
if (tail != nullptr)
executableSize = bufferSize + tail->size();
else
executableSize = bufferSize;
@ -177,7 +177,7 @@ struct AssemblerBuffer
int local_off = off.getOffset();
// don't update the structure's finger in place, so there is the option
// to not update it.
Slice *cur = NULL;
Slice *cur = nullptr;
int cur_off;
// get the offset that we'd be dealing with by walking through backwards
int end_off = bufferSize - local_off;
@ -204,16 +204,16 @@ struct AssemblerBuffer
}
int count = 0;
if (local_off < cur_off) {
for (; cur != NULL; cur = cur->getPrev(), cur_off -= cur->size()) {
for (; cur != nullptr; cur = cur->getPrev(), cur_off -= cur->size()) {
if (local_off >= cur_off) {
local_off -= cur_off;
break;
}
count++;
}
JS_ASSERT(cur != NULL);
JS_ASSERT(cur != nullptr);
} else {
for (; cur != NULL; cur = cur->getNext()) {
for (; cur != nullptr; cur = cur->getNext()) {
int cur_size = cur->size();
if (local_off < cur_off + cur_size) {
local_off -= cur_off;
@ -222,7 +222,7 @@ struct AssemblerBuffer
cur_off += cur_size;
count++;
}
JS_ASSERT(cur != NULL);
JS_ASSERT(cur != nullptr);
}
if (count > 2 || used_finger) {
finger = cur;
@ -233,7 +233,7 @@ struct AssemblerBuffer
return (Inst*)&cur->instructions[local_off];
}
BufferOffset nextOffset() const {
if (tail != NULL)
if (tail != nullptr)
return BufferOffset(bufferSize + tail->size());
else
return BufferOffset(bufferSize);

Просмотреть файл

@ -51,7 +51,7 @@ struct Pool
int limitingUsee;
Pool(int maxOffset_, int immSize_, int instSize_, int bias_, int alignment_, LifoAlloc &LifoAlloc_,
bool isBackref_ = false, bool canDedup_ = false, Pool *other_ = NULL)
bool isBackref_ = false, bool canDedup_ = false, Pool *other_ = nullptr)
: maxOffset(maxOffset_), immSize(immSize_), instSize(instSize_),
bias(bias_), alignment(alignment_),
isBackref(isBackref_), canDedup(canDedup_), other(other_),
@ -135,7 +135,7 @@ struct Pool
buffSize <<= 1;
uint8_t *tmp = static_cast<uint8_t*>(LifoAlloc_.alloc(immSize * buffSize));
memcpy(tmp, poolData, immSize * numEntries);
if (poolData == NULL) {
if (poolData == nullptr) {
buffSize = 0;
return -1;
}
@ -150,11 +150,11 @@ struct Pool
numEntries = 0;
buffSize = 8;
poolData = static_cast<uint8_t*>(a.alloc(buffSize * immSize));
if (poolData == NULL)
if (poolData == nullptr)
return false;
void *otherSpace = a.alloc(sizeof(Pool));
if (otherSpace == NULL)
if (otherSpace == nullptr)
return false;
other = new (otherSpace) Pool(other->maxOffset, other->immSize, other->instSize,
@ -212,7 +212,7 @@ struct BufferSliceTail : public BufferSlice<SliceSize> {
BufferSliceTail *getNext() {
return (BufferSliceTail *)this->next;
}
BufferSliceTail() : data(NULL), isNatural(true) {
BufferSliceTail() : data(nullptr), isNatural(true) {
memset(isBranch, 0, sizeof(isBranch));
}
void markNextAsBranch() {
@ -365,7 +365,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
BufferSlice *tmp = static_cast<BufferSlice*>(a.alloc(sizeof(BufferSlice)));
if (!tmp) {
this->m_oom = true;
return NULL;
return nullptr;
}
new (tmp) BufferSlice;
return tmp;
@ -376,9 +376,9 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
footerSize(footerSize_),
pools(pools_),
instBufferAlign(instBufferAlign_), numDumps(0),
poolInfo(NULL),
poolInfo(nullptr),
poolSize(0), canNotPlacePool(0), inBackref(false),
perforatedNode(NULL), id(-1)
perforatedNode(nullptr), id(-1)
{
for (int idx = 0; idx < numPoolKinds; idx++) {
entryCount[idx] = 0;
@ -411,7 +411,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
int curIndex = 0;
int curInstOffset = 0;
JS_ASSERT(start == dest);
for (BufferSlice * cur = *getHead(); cur != NULL; cur = cur->getNext()) {
for (BufferSlice * cur = *getHead(); cur != nullptr; cur = cur->getNext()) {
Chunk *src = (Chunk*)cur->instructions;
for (unsigned int idx = 0; idx <cur->size()/InstBaseSize;
idx++, curInstOffset += InstBaseSize) {
@ -423,7 +423,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
memcpy(&dest[idx], &src[idx], sizeof(Chunk));
}
dest+=cur->size()/InstBaseSize;
if (cur->data != NULL) {
if (cur->data != nullptr) {
// have the repatcher move on to the next pool
curIndex ++;
// loop over all of the pools, copying them into place.
@ -454,11 +454,11 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
}
}
BufferOffset insertEntry(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data, PoolEntry *pe = NULL) {
BufferOffset insertEntry(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data, PoolEntry *pe = nullptr) {
if (this->oom() && !this->bail())
return BufferOffset();
int token;
if (p != NULL) {
if (p != nullptr) {
int poolId = p - pools;
const char sigil = inBackref ? 'B' : 'F';
@ -474,7 +474,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
token = insertEntryForwards(instSize, inst, p, data);
// now to get an instruction to write
PoolEntry retPE;
if (p != NULL) {
if (p != nullptr) {
if (this->oom())
return BufferOffset();
int poolId = p - pools;
@ -487,7 +487,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
entryCount[poolId]++;
}
// Now inst is a valid thing to insert into the instruction stream
if (pe != NULL)
if (pe != nullptr)
*pe = retPE;
return this->putBlob(instSize, inst);
}
@ -497,7 +497,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
// anything into a pool after a pool has been placed, we don't affect
// anything relevant, so we can skip this check entirely!
if (p == NULL)
if (p == nullptr)
return INT_MIN;
// TODO: calculating offsets for the alignment requirements is *hard*
// Instead, assume that we always add the maximum.
@ -516,7 +516,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
if (tmp->checkFullBackref(poolOffset, perforation.getOffset())) {
// uh-oh, the backwards pool is full. Time to finalize it, and
// switch to a new forward pool.
if (p != NULL)
if (p != nullptr)
IonSpew(IonSpew_Pools, "[%d]Inserting pool entry caused a spill", id);
else
IonSpew(IonSpew_Pools, "[%d]Inserting instruction(%d) caused a spill", id, size());
@ -573,7 +573,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
}
if (tmp->checkFull(poolOffset)) {
// uh-oh. DUMP DUMP DUMP
if (p != NULL)
if (p != nullptr)
IonSpew(IonSpew_Pools, "[%d] Inserting pool entry caused a spill", id);
else
IonSpew(IonSpew_Pools, "[%d] Inserting instruction(%d) caused a spill", id, size());
@ -587,13 +587,13 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
}
nextOffset += tmp->immSize * tmp->numEntries;
}
if (p == NULL) {
if (p == nullptr) {
return INT_MIN;
}
return p->insertEntry(data, this->nextOffset(), this->LifoAlloc_);
}
BufferOffset putInt(uint32_t value) {
return insertEntry(sizeof(uint32_t) / sizeof(uint8_t), (uint8_t*)&value, NULL, NULL);
return insertEntry(sizeof(uint32_t) / sizeof(uint8_t), (uint8_t*)&value, nullptr, nullptr);
}
// Mark the current section as an area where we can
// later go to dump a pool
@ -638,7 +638,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
poolIsEmpty = false;
break;
}
if (pools[poolIdx].other != NULL && pools[poolIdx].other->numEntries != 0) {
if (pools[poolIdx].other != nullptr && pools[poolIdx].other->numEntries != 0) {
poolIsEmpty = false;
break;
}
@ -676,17 +676,17 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
// The code below also creates a new pool, but that is not necessary, since
// the pools have not been modified at all.
new (&perforation) BufferOffset();
perforatedNode = NULL;
perforatedNode = nullptr;
inBackref = false;
IonSpew(IonSpew_Pools, "[%d] Aborting because the pool is empty", id);
// Bail out early, since we don't want to even pretend these pools exist.
return;
}
JS_ASSERT(perforatedNode != NULL);
JS_ASSERT(perforatedNode != nullptr);
if (numDumps >= (1<<logBasePoolInfo) && (numDumps & (numDumps-1)) == 0) {
// need to resize.
PoolInfo *tmp = static_cast<PoolInfo*>(this->LifoAlloc_.alloc(sizeof(PoolInfo) * numDumps * 2));
if (tmp == NULL) {
if (tmp == nullptr) {
this->fail_oom();
return;
}
@ -712,7 +712,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
int skippedBytes = 0;
for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) {
Pool *p = pools[poolIdx].other;
JS_ASSERT(p != NULL);
JS_ASSERT(p != nullptr);
unsigned int idx = p->numEntries-1;
// Allocate space for tracking information that needs to be propagated to the next pool
// as well as space for quickly updating the pool entries in the current pool to remove
@ -786,7 +786,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
// bind the current pool to the perforation point.
Pool **tmp = &perforatedNode->data;
*tmp = static_cast<Pool*>(this->LifoAlloc_.alloc(sizeof(Pool) * numPoolKinds));
if (tmp == NULL) {
if (tmp == nullptr) {
this->fail_oom();
return;
}
@ -807,7 +807,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
}
}
new (&perforation) BufferOffset();
perforatedNode = NULL;
perforatedNode = nullptr;
inBackref = false;
// Now that the backwards pool has been emptied, and a new forward pool
@ -868,7 +868,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
BufferOffset branch = this->nextOffset();
bool shouldMarkAsBranch = this->isNextBranch();
this->markNextAsBranch();
this->putBlob(guardSize, NULL);
this->putBlob(guardSize, nullptr);
BufferOffset afterPool = this->nextOffset();
Asm::writePoolGuard(branch, this->getInst(branch), afterPool);
markGuard();
@ -1012,11 +1012,11 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
BufferOffset branch = this->nextOffset();
this->markNextAsBranch();
this->putBlob(guardSize, NULL);
this->putBlob(guardSize, nullptr);
BufferOffset afterPool = this->nextOffset();
Asm::writePoolGuard(branch, this->getInst(branch), afterPool);
markGuard();
if (perforatedNode != NULL)
if (perforatedNode != nullptr)
perforatedNode->isNatural = false;
}
canNotPlacePool++;
@ -1035,11 +1035,11 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
// the node, then whoops, we want to mark the first instruction of
// the next node.
this->ensureSpace(InstBaseSize);
JS_ASSERT(*this->getTail() != NULL);
JS_ASSERT(*this->getTail() != nullptr);
(*this->getTail())->markNextAsBranch();
}
bool isNextBranch() {
JS_ASSERT(*this->getTail() != NULL);
JS_ASSERT(*this->getTail() != nullptr);
return (*this->getTail())->isNextBranch();
}
@ -1066,7 +1066,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
private:
void getPEPool(PoolEntry pe, Pool **retP, int32_t * retOffset, int32_t *poolNum) const {
int poolKind = pe.poolKind();
Pool *p = NULL;
Pool *p = nullptr;
uint32_t offset = pe.offset() * pools[poolKind].immSize;
int idx;
for (idx = 0; idx < numDumps; idx++) {
@ -1078,21 +1078,21 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
if (p->getPoolSize() > offset)
break;
offset -= p->getPoolSize();
p = NULL;
p = nullptr;
}
if (poolNum != NULL)
if (poolNum != nullptr)
*poolNum = idx;
// If this offset is contained in any finished pool, forward or backwards, p now
// points to that pool, if it is not in any pool (should be in the currently building pool)
// then p is NULL.
if (p == NULL) {
// then p is nullptr.
if (p == nullptr) {
p = &pools[poolKind];
if (offset >= p->getPoolSize()) {
p = p->other;
offset -= p->getPoolSize();
}
}
JS_ASSERT(p != NULL);
JS_ASSERT(p != nullptr);
JS_ASSERT(offset < p->getPoolSize());
*retP = p;
*retOffset = offset;
@ -1100,7 +1100,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
uint8_t *getPoolEntry(PoolEntry pe) {
Pool *p;
int32_t offset;
getPEPool(pe, &p, &offset, NULL);
getPEPool(pe, &p, &offset, nullptr);
return &p->poolData[offset];
}
size_t getPoolEntrySize(PoolEntry pe) {

Просмотреть файл

@ -70,11 +70,11 @@ class IonJSFrameLayout : public IonCommonFrameLayout
return offsetof(IonJSFrameLayout, numActualArgs_);
}
static size_t offsetOfThis() {
IonJSFrameLayout *base = NULL;
IonJSFrameLayout *base = nullptr;
return reinterpret_cast<size_t>(&base->argv()[0]);
}
static size_t offsetOfActualArgs() {
IonJSFrameLayout *base = NULL;
IonJSFrameLayout *base = nullptr;
// +1 to skip |this|.
return reinterpret_cast<size_t>(&base->argv()[1]);
}
@ -184,15 +184,15 @@ class IonExitFrameLayout : public IonCommonFrameLayout
// each wrapper are pushed before the exit frame. This correspond exactly
// to the value of the argBase register of the generateVMWrapper function.
inline uint8_t *argBase() {
JS_ASSERT(footer()->ionCode() != NULL);
JS_ASSERT(footer()->ionCode() != nullptr);
return top();
}
inline bool isWrapperExit() {
return footer()->function() != NULL;
return footer()->function() != nullptr;
}
inline bool isNativeExit() {
return footer()->ionCode() == NULL;
return footer()->ionCode() == nullptr;
}
inline bool isOOLNativeExit() {
return footer()->ionCode() == ION_FRAME_OOL_NATIVE;

Просмотреть файл

@ -32,7 +32,7 @@ class LDivI : public LBinaryMath<1>
}
if (mir()->canBeNegativeZero())
return mir()->canBeNegativeOverflow() ? "NegativeZero_NegativeOverflow" : "NegativeZero";
return mir()->canBeNegativeOverflow() ? "NegativeOverflow" : NULL;
return mir()->canBeNegativeOverflow() ? "NegativeOverflow" : nullptr;
}
const LDefinition *remainder() {
@ -103,7 +103,7 @@ class LModI : public LBinaryMath<1>
}
const char *extraName() const {
return mir()->isTruncated() ? "Truncated" : NULL;
return mir()->isTruncated() ? "Truncated" : nullptr;
}
const LDefinition *remainder() {
@ -281,7 +281,7 @@ class LMulI : public LBinaryMath<0, 1>
const char *extraName() const {
return (mir()->mode() == MMul::Integer)
? "Integer"
: (mir()->canBeNegativeZero() ? "CanBeNegativeZero" : NULL);
: (mir()->canBeNegativeZero() ? "CanBeNegativeZero" : nullptr);
}
MMul *mir() const {

Просмотреть файл

@ -62,11 +62,11 @@ LIRGeneratorShared::buildSnapshot(LInstruction *ins, MResumePoint *rp, BailoutKi
{
LSnapshot *snapshot = LSnapshot::New(gen, rp, kind);
if (!snapshot)
return NULL;
return nullptr;
FlattenedMResumePointIter iter(rp);
if (!iter.init())
return NULL;
return nullptr;
size_t i = 0;
for (MResumePoint **it = iter.begin(), **end = iter.end(); it != end; ++it) {
@ -120,11 +120,11 @@ LIRGeneratorShared::buildSnapshot(LInstruction *ins, MResumePoint *rp, BailoutKi
{
LSnapshot *snapshot = LSnapshot::New(gen, rp, kind);
if (!snapshot)
return NULL;
return nullptr;
FlattenedMResumePointIter iter(rp);
if (!iter.init())
return NULL;
return nullptr;
size_t i = 0;
for (MResumePoint **it = iter.begin(), **end = iter.end(); it != end; ++it) {

Просмотреть файл

@ -38,8 +38,8 @@ class LIRGeneratorShared : public MInstructionVisitorWithDefaults
: gen(gen),
graph(graph),
lirGraph_(lirGraph),
lastResumePoint_(NULL),
osiPoint_(NULL)
lastResumePoint_(nullptr),
osiPoint_(nullptr)
{ }
MIRGenerator *mir() {
@ -143,14 +143,14 @@ class LIRGeneratorShared : public MInstructionVisitorWithDefaults
}
template <typename T> void annotate(T *ins);
template <typename T> bool add(T *ins, MInstruction *mir = NULL);
template <typename T> bool add(T *ins, MInstruction *mir = nullptr);
void lowerTypedPhiInput(MPhi *phi, uint32_t inputPosition, LBlock *block, size_t lirIndex);
bool defineTypedPhi(MPhi *phi, size_t lirIndex);
LOsiPoint *popOsiPoint() {
LOsiPoint *tmp = osiPoint_;
osiPoint_ = NULL;
osiPoint_ = nullptr;
return tmp;
}

Просмотреть файл

@ -96,7 +96,7 @@ Assembler::writeRelocation(JmpSrc src, Relocation::Kind reloc)
void
Assembler::addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc)
{
JS_ASSERT(target.value != NULL);
JS_ASSERT(target.value != nullptr);
// Emit reloc before modifying the jump table, since it computes a 0-based
// index. This jump is not patchable at runtime.
@ -113,7 +113,7 @@ Assembler::addPatchableJump(JmpSrc src, Relocation::Kind reloc)
writeRelocation(src, reloc);
size_t index = jumps_.length();
enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), NULL, reloc));
enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), nullptr, reloc));
return index;
}
@ -176,9 +176,9 @@ Assembler::executableCopy(uint8_t *buffer)
RelativePatch &rp = jumps_[i];
uint8_t *src = buffer + rp.offset;
if (!rp.target) {
// The patch target is NULL for jumps that have been linked to a
// label within the same code block, but may be repatched later to
// jump to a different code block.
// The patch target is nullptr for jumps that have been linked to
// a label within the same code block, but may be repatched later
// to jump to a different code block.
continue;
}
if (JSC::X86Assembler::canRelinkJump(src, rp.target)) {

Просмотреть файл

@ -246,7 +246,7 @@ IonRuntime::generateEnterJIT(JSContext *cx, EnterJitType type)
// Baseline OSR will return here.
masm.bind(returnLabel.src());
if (!masm.addCodeLabel(returnLabel))
return NULL;
return nullptr;
}
// Pop arguments and padding from stack.
@ -409,7 +409,7 @@ IonRuntime::generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void *
// Call the target function.
// Note that this code assumes the function is JITted.
masm.loadPtr(Address(rax, JSFunction::offsetOfNativeOrScript()), rax);
masm.loadBaselineOrIonRaw(rax, rax, mode, NULL);
masm.loadBaselineOrIonRaw(rax, rax, mode, nullptr);
masm.call(rax);
uint32_t returnOffset = masm.currentOffset();
@ -675,7 +675,7 @@ IonRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
Linker linker(masm);
IonCode *wrapper = linker.newCode(cx, JSC::OTHER_CODE);
if (!wrapper)
return NULL;
return nullptr;
#ifdef JS_ION_PERF
writePerfSpewerIonCodeProfile(wrapper, "VMWrapper");
@ -684,7 +684,7 @@ IonRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
// linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
// use relookupOrAdd instead of add.
if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
return NULL;
return nullptr;
return wrapper;
}
@ -744,14 +744,14 @@ IonRuntime::generateDebugTrapHandler(JSContext *cx)
masm.subPtr(Imm32(BaselineFrame::Size()), scratch2);
// Enter a stub frame and call the HandleDebugTrap VM function. Ensure
// the stub frame has a NULL ICStub pointer, since this pointer is marked
// the stub frame has a nullptr ICStub pointer, since this pointer is marked
// during GC.
masm.movePtr(ImmPtr(NULL), BaselineStubReg);
masm.movePtr(ImmPtr(nullptr), BaselineStubReg);
EmitEnterStubFrame(masm, scratch3);
IonCode *code = cx->runtime()->ionRuntime()->getVMWrapper(HandleDebugTrapInfo);
if (!code)
return NULL;
return nullptr;
masm.push(scratch1);
masm.push(scratch2);

Просмотреть файл

@ -455,7 +455,7 @@ CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic
Register ptr = ToRegister(ins->ptr());
const LDefinition *out = ins->output();
OutOfLineLoadTypedArrayOutOfBounds *ool = NULL;
OutOfLineLoadTypedArrayOutOfBounds *ool = nullptr;
if (!mir->fallible()) {
ool = new OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out));
if (!addOutOfLineCode(ool))

Просмотреть файл

@ -24,7 +24,7 @@ MacroAssemblerX86::getDouble(double d)
if (!doubleMap_.initialized()) {
enoughMemory_ &= doubleMap_.init();
if (!enoughMemory_)
return NULL;
return nullptr;
}
size_t doubleIndex;
DoubleMap::AddPtr p = doubleMap_.lookupForAdd(d);
@ -35,7 +35,7 @@ MacroAssemblerX86::getDouble(double d)
enoughMemory_ &= doubles_.append(Double(d));
enoughMemory_ &= doubleMap_.add(p, d, doubleIndex);
if (!enoughMemory_)
return NULL;
return nullptr;
}
Double &dbl = doubles_[doubleIndex];
JS_ASSERT(!dbl.uses.bound());
@ -70,7 +70,7 @@ MacroAssemblerX86::getFloat(float f)
if (!floatMap_.initialized()) {
enoughMemory_ &= floatMap_.init();
if (!enoughMemory_)
return NULL;
return nullptr;
}
size_t floatIndex;
FloatMap::AddPtr p = floatMap_.lookupForAdd(f);
@ -81,7 +81,7 @@ MacroAssemblerX86::getFloat(float f)
enoughMemory_ &= floats_.append(Float(f));
enoughMemory_ &= floatMap_.add(p, f, floatIndex);
if (!enoughMemory_)
return NULL;
return nullptr;
}
Float &flt = floats_[floatIndex];
JS_ASSERT(!flt.uses.bound());

Просмотреть файл

@ -243,7 +243,7 @@ IonRuntime::generateEnterJIT(JSContext *cx, EnterJitType type)
// Baseline OSR will return here.
masm.bind(returnLabel.src());
if (!masm.addCodeLabel(returnLabel))
return NULL;
return nullptr;
}
// Pop arguments off the stack.
@ -413,7 +413,7 @@ IonRuntime::generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void *
// Call the target function.
// Note that this assumes the function is JITted.
masm.loadPtr(Address(eax, JSFunction::offsetOfNativeOrScript()), eax);
masm.loadBaselineOrIonRaw(eax, eax, mode, NULL);
masm.loadBaselineOrIonRaw(eax, eax, mode, nullptr);
masm.call(eax);
uint32_t returnOffset = masm.currentOffset();
@ -708,7 +708,7 @@ IonRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
Linker linker(masm);
IonCode *wrapper = linker.newCode(cx, JSC::OTHER_CODE);
if (!wrapper)
return NULL;
return nullptr;
#ifdef JS_ION_PERF
writePerfSpewerIonCodeProfile(wrapper, "VMWrapper");
@ -717,7 +717,7 @@ IonRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
// linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
// use relookupOrAdd instead of add.
if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
return NULL;
return nullptr;
return wrapper;
}
@ -784,14 +784,14 @@ IonRuntime::generateDebugTrapHandler(JSContext *cx)
masm.subPtr(Imm32(BaselineFrame::Size()), scratch2);
// Enter a stub frame and call the HandleDebugTrap VM function. Ensure
// the stub frame has a NULL ICStub pointer, since this pointer is marked
// during GC.
masm.movePtr(ImmPtr(NULL), BaselineStubReg);
// the stub frame has a nullptr ICStub pointer, since this pointer is
// marked during GC.
masm.movePtr(ImmPtr(nullptr), BaselineStubReg);
EmitEnterStubFrame(masm, scratch3);
IonCode *code = cx->runtime()->ionRuntime()->getVMWrapper(HandleDebugTrapInfo);
if (!code)
return NULL;
return nullptr;
masm.push(scratch1);
masm.push(scratch2);