diff --git a/js/public/Class.h b/js/public/Class.h index 646525a140d8..79628c575727 100644 --- a/js/public/Class.h +++ b/js/public/Class.h @@ -701,7 +701,7 @@ static const uintptr_t JSCLASS_RESERVED_SLOTS_SHIFT = 8; static const uint32_t JSCLASS_RESERVED_SLOTS_WIDTH = 8; static const uint32_t JSCLASS_RESERVED_SLOTS_MASK = - JS_BITMASK(JSCLASS_RESERVED_SLOTS_WIDTH); + js::BitMask(JSCLASS_RESERVED_SLOTS_WIDTH); static constexpr uint32_t JSCLASS_HAS_RESERVED_SLOTS(uint32_t n) { return (n & JSCLASS_RESERVED_SLOTS_MASK) << JSCLASS_RESERVED_SLOTS_SHIFT; @@ -759,7 +759,7 @@ static constexpr uint32_t JSCLASS_GLOBAL_FLAGS = // Fast access to the original value of each standard class's prototype. static const uint32_t JSCLASS_CACHED_PROTO_SHIFT = JSCLASS_HIGH_FLAGS_SHIFT + 9; static const uint32_t JSCLASS_CACHED_PROTO_MASK = - JS_BITMASK(js::JSCLASS_CACHED_PROTO_WIDTH); + js::BitMask(js::JSCLASS_CACHED_PROTO_WIDTH); static_assert(JSProto_LIMIT <= (JSCLASS_CACHED_PROTO_MASK + 1), "JSProtoKey must not exceed the maximum cacheable proto-mask"); diff --git a/js/public/HeapAPI.h b/js/public/HeapAPI.h index 4a353ded85b4..b2fffbd75ecc 100644 --- a/js/public/HeapAPI.h +++ b/js/public/HeapAPI.h @@ -230,14 +230,15 @@ struct Zone { }; struct String { - static const uint32_t NON_ATOM_BIT = JS_BIT(1); - static const uint32_t LINEAR_BIT = JS_BIT(4); - static const uint32_t INLINE_CHARS_BIT = JS_BIT(6); - static const uint32_t LATIN1_CHARS_BIT = JS_BIT(9); - static const uint32_t EXTERNAL_FLAGS = LINEAR_BIT | NON_ATOM_BIT | JS_BIT(8); - static const uint32_t TYPE_FLAGS_MASK = JS_BITMASK(9) - JS_BIT(2) - JS_BIT(0); - static const uint32_t PERMANENT_ATOM_MASK = NON_ATOM_BIT | JS_BIT(8); - static const uint32_t PERMANENT_ATOM_FLAGS = JS_BIT(8); + static const uint32_t NON_ATOM_BIT = js::Bit(1); + static const uint32_t LINEAR_BIT = js::Bit(4); + static const uint32_t INLINE_CHARS_BIT = js::Bit(6); + static const uint32_t LATIN1_CHARS_BIT = js::Bit(9); + static const uint32_t EXTERNAL_FLAGS = LINEAR_BIT | NON_ATOM_BIT | js::Bit(8); + static const uint32_t TYPE_FLAGS_MASK = + js::BitMask(9) - js::Bit(2) - js::Bit(0); + static const uint32_t PERMANENT_ATOM_MASK = NON_ATOM_BIT | js::Bit(8); + static const uint32_t PERMANENT_ATOM_FLAGS = js::Bit(8); uintptr_t flags_; #if JS_BITS_PER_WORD == 32 diff --git a/js/src/builtin/MapObject.cpp b/js/src/builtin/MapObject.cpp index 298653b5cc62..751ef2539a0d 100644 --- a/js/src/builtin/MapObject.cpp +++ b/js/src/builtin/MapObject.cpp @@ -219,7 +219,7 @@ MapIteratorObject* MapIteratorObject::create(JSContext* cx, HandleObject obj, iterobj->setSlot(RangeSlot, PrivateValue(nullptr)); iterobj->setSlot(KindSlot, Int32Value(int32_t(kind))); - const size_t size = JS_ROUNDUP(sizeof(ValueMap::Range), gc::CellAlignBytes); + const size_t size = RoundUp(sizeof(ValueMap::Range), gc::CellAlignBytes); buffer = nursery.allocateBufferSameLocation(iterobj, size); if (buffer) { break; @@ -992,7 +992,7 @@ SetIteratorObject* SetIteratorObject::create(JSContext* cx, HandleObject obj, iterobj->setSlot(RangeSlot, PrivateValue(nullptr)); iterobj->setSlot(KindSlot, Int32Value(int32_t(kind))); - const size_t size = JS_ROUNDUP(sizeof(ValueSet::Range), gc::CellAlignBytes); + const size_t size = RoundUp(sizeof(ValueSet::Range), gc::CellAlignBytes); buffer = nursery.allocateBufferSameLocation(iterobj, size); if (buffer) { break; diff --git a/js/src/ds/LifoAlloc.cpp b/js/src/ds/LifoAlloc.cpp index 90abba4663b1..9380693c5f47 100644 --- a/js/src/ds/LifoAlloc.cpp +++ b/js/src/ds/LifoAlloc.cpp @@ -162,7 +162,7 @@ static size_t NextSize(size_t start, size_t used) { // After 1 MB, grow more gradually, to waste less memory. // The sequence (in megabytes) begins: // 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, ... - return JS_ROUNDUP(used / 8, mb); + return RoundUp(used / 8, mb); } LifoAlloc::UniqueBumpChunk LifoAlloc::newChunkWithCapacity(size_t n, diff --git a/js/src/frontend/BytecodeEmitter.cpp b/js/src/frontend/BytecodeEmitter.cpp index df9e0b0d4f8a..205a5c86cf57 100644 --- a/js/src/frontend/BytecodeEmitter.cpp +++ b/js/src/frontend/BytecodeEmitter.cpp @@ -477,7 +477,7 @@ bool BytecodeEmitter::emitDupAt(unsigned slotFromTop, unsigned count) { return emit1(JSOP_DUP2); } - if (slotFromTop >= JS_BIT(24)) { + if (slotFromTop >= Bit(24)) { reportError(nullptr, JSMSG_TOO_MANY_LOCALS); return false; } @@ -2069,11 +2069,11 @@ bool BytecodeEmitter::emitNumberOp(double dval) { } uint32_t u = uint32_t(ival); - if (u < JS_BIT(16)) { + if (u < Bit(16)) { if (!emitUint16Operand(JSOP_UINT16, u)) { return false; } - } else if (u < JS_BIT(24)) { + } else if (u < Bit(24)) { BytecodeOffset off; if (!emitN(JSOP_UINT24, 3, &off)) { return false; @@ -2272,7 +2272,7 @@ bool BytecodeEmitter::isRunOnceLambda() { bool BytecodeEmitter::allocateResumeIndex(BytecodeOffset offset, uint32_t* resumeIndex) { - static constexpr uint32_t MaxResumeIndex = JS_BITMASK(24); + static constexpr uint32_t MaxResumeIndex = BitMask(24); static_assert( MaxResumeIndex < uint32_t(AbstractGeneratorObject::RESUME_INDEX_RUNNING), diff --git a/js/src/frontend/SourceNotes.h b/js/src/frontend/SourceNotes.h index 22a53bd72927..0e2d19f8ee4c 100644 --- a/js/src/frontend/SourceNotes.h +++ b/js/src/frontend/SourceNotes.h @@ -214,9 +214,9 @@ inline bool SN_IS_TERMINATOR(jssrcnote* sn) { return *sn == SRC_NULL; } #define SN_TYPE_BITS 5 #define SN_DELTA_BITS 3 #define SN_XDELTA_BITS 6 -#define SN_TYPE_MASK (JS_BITMASK(SN_TYPE_BITS) << SN_DELTA_BITS) -#define SN_DELTA_MASK ((ptrdiff_t)JS_BITMASK(SN_DELTA_BITS)) -#define SN_XDELTA_MASK ((ptrdiff_t)JS_BITMASK(SN_XDELTA_BITS)) +#define SN_TYPE_MASK (js::BitMask(SN_TYPE_BITS) << SN_DELTA_BITS) +#define SN_DELTA_MASK ((ptrdiff_t)js::BitMask(SN_DELTA_BITS)) +#define SN_XDELTA_MASK ((ptrdiff_t)js::BitMask(SN_XDELTA_BITS)) #define SN_MAKE_NOTE(sn, t, d) \ (*(sn) = (jssrcnote)(((t) << SN_DELTA_BITS) | ((d)&SN_DELTA_MASK))) @@ -235,8 +235,8 @@ inline bool SN_IS_TERMINATOR(jssrcnote* sn) { return *sn == SRC_NULL; } (SN_IS_XDELTA(sn) ? SN_MAKE_XDELTA(sn, delta) \ : SN_MAKE_NOTE(sn, SN_TYPE(sn), delta)) -#define SN_DELTA_LIMIT ((ptrdiff_t)JS_BIT(SN_DELTA_BITS)) -#define SN_XDELTA_LIMIT ((ptrdiff_t)JS_BIT(SN_XDELTA_BITS)) +#define SN_DELTA_LIMIT ((ptrdiff_t)js::Bit(SN_DELTA_BITS)) +#define SN_XDELTA_LIMIT ((ptrdiff_t)js::Bit(SN_XDELTA_BITS)) /* * Offset fields follow certain notes and are frequency-encoded: an offset in diff --git a/js/src/frontend/SwitchEmitter.cpp b/js/src/frontend/SwitchEmitter.cpp index c7ee2df4f622..f0327cb7a53c 100644 --- a/js/src/frontend/SwitchEmitter.cpp +++ b/js/src/frontend/SwitchEmitter.cpp @@ -32,7 +32,7 @@ bool SwitchEmitter::TableGenerator::addNumber(int32_t caseValue) { return true; } - if (unsigned(caseValue + int(JS_BIT(15))) >= unsigned(JS_BIT(16))) { + if (unsigned(caseValue + int(Bit(15))) >= unsigned(Bit(16))) { setInvalid(); return true; } @@ -48,7 +48,7 @@ bool SwitchEmitter::TableGenerator::addNumber(int32_t caseValue) { // We bias caseValue by 65536 if it's negative, and hope that's a rare case // (because it requires a malloc'd bitmap). if (caseValue < 0) { - caseValue += JS_BIT(16); + caseValue += Bit(16); } if (caseValue >= intmapBitLength_) { size_t newLength = NumWordsForBitArrayOfLength(caseValue + 1); @@ -87,7 +87,7 @@ void SwitchEmitter::TableGenerator::finish(uint32_t caseCount) { // Compute table length and select condswitch instead if overlarge // or more than half-sparse. tableLength_ = uint32_t(high_ - low_ + 1); - if (tableLength_ >= JS_BIT(16) || tableLength_ > 2 * caseCount) { + if (tableLength_ >= Bit(16) || tableLength_ > 2 * caseCount) { setInvalid(); } } @@ -139,7 +139,7 @@ bool SwitchEmitter::emitLexical(Handle bindings) { bool SwitchEmitter::validateCaseCount(uint32_t caseCount) { MOZ_ASSERT(state_ == State::Discriminant || state_ == State::Lexical); - if (caseCount > JS_BIT(16)) { + if (caseCount > Bit(16)) { bce_->reportError(switchPos_, JSMSG_TOO_MANY_CASES); return false; } diff --git a/js/src/fuzz-tests/testStructuredCloneReader.cpp b/js/src/fuzz-tests/testStructuredCloneReader.cpp index b76b636876ec..0025779b451d 100644 --- a/js/src/fuzz-tests/testStructuredCloneReader.cpp +++ b/js/src/fuzz-tests/testStructuredCloneReader.cpp @@ -33,7 +33,7 @@ static int testStructuredCloneReaderFuzz(const uint8_t* buf, size_t size) { // Make sure to pad the buffer to a multiple of kSegmentAlignment const size_t kSegmentAlignment = 8; - size_t buf_size = JS_ROUNDUP(size, kSegmentAlignment); + size_t buf_size = RoundUp(size, kSegmentAlignment); JS::StructuredCloneScope scope = JS::StructuredCloneScope::DifferentProcess; diff --git a/js/src/gc/Cell.h b/js/src/gc/Cell.h index dc3d09d54ace..f1a50e25fda3 100644 --- a/js/src/gc/Cell.h +++ b/js/src/gc/Cell.h @@ -69,16 +69,16 @@ struct alignas(gc::CellAlignBytes) Cell { public: // The low bits of the first word of each Cell are reserved for GC flags. static constexpr int ReservedBits = 2; - static constexpr uintptr_t RESERVED_MASK = JS_BITMASK(ReservedBits); + static constexpr uintptr_t RESERVED_MASK = BitMask(ReservedBits); // Indicates if the cell is currently a RelocationOverlay - static constexpr uintptr_t FORWARD_BIT = JS_BIT(0); + static constexpr uintptr_t FORWARD_BIT = Bit(0); // When a Cell is in the nursery, this will indicate if it is a JSString (1) // or JSObject (0). When not in nursery, this bit is still reserved for // JSString to use as JSString::NON_ATOM bit. This may be removed by Bug // 1376646. - static constexpr uintptr_t JSSTRING_BIT = JS_BIT(1); + static constexpr uintptr_t JSSTRING_BIT = Bit(1); MOZ_ALWAYS_INLINE bool isTenured() const { return !IsInsideNursery(this); } MOZ_ALWAYS_INLINE const TenuredCell& asTenured() const; diff --git a/js/src/gc/GC.cpp b/js/src/gc/GC.cpp index 7251f382b858..6e8aa99f6c63 100644 --- a/js/src/gc/GC.cpp +++ b/js/src/gc/GC.cpp @@ -286,14 +286,14 @@ const AllocKind gc::slotsToThingKind[] = { // Check that reserved bits of a Cell are compatible with our typical allocators // since most derived classes will store a pointer in the first word. -static_assert(js::detail::LIFO_ALLOC_ALIGN > JS_BITMASK(Cell::ReservedBits), +static_assert(js::detail::LIFO_ALLOC_ALIGN > BitMask(Cell::ReservedBits), "Cell::ReservedBits should support LifoAlloc"); -static_assert(CellAlignBytes > JS_BITMASK(Cell::ReservedBits), +static_assert(CellAlignBytes > BitMask(Cell::ReservedBits), "Cell::ReservedBits should support gc::Cell"); static_assert( - sizeof(uintptr_t) > JS_BITMASK(Cell::ReservedBits), + sizeof(uintptr_t) > BitMask(Cell::ReservedBits), "Cell::ReservedBits should support small malloc / aligned globals"); -static_assert(js::jit::CodeAlignment > JS_BITMASK(Cell::ReservedBits), +static_assert(js::jit::CodeAlignment > BitMask(Cell::ReservedBits), "Cell::ReservedBits should support JIT code"); static_assert(mozilla::ArrayLength(slotsToThingKind) == diff --git a/js/src/gc/Nursery-inl.h b/js/src/gc/Nursery-inl.h index 60e5f2687841..7c605d85452b 100644 --- a/js/src/gc/Nursery-inl.h +++ b/js/src/gc/Nursery-inl.h @@ -100,7 +100,7 @@ namespace js { template static inline T* AllocateObjectBuffer(JSContext* cx, uint32_t count) { - size_t nbytes = JS_ROUNDUP(count * sizeof(T), sizeof(Value)); + size_t nbytes = RoundUp(count * sizeof(T), sizeof(Value)); T* buffer = static_cast(cx->nursery().allocateBuffer(cx->zone(), nbytes)); if (!buffer) { ReportOutOfMemory(cx); @@ -114,7 +114,7 @@ static inline T* AllocateObjectBuffer(JSContext* cx, JSObject* obj, if (cx->isHelperThreadContext()) { return cx->pod_malloc(count); } - size_t nbytes = JS_ROUNDUP(count * sizeof(T), sizeof(Value)); + size_t nbytes = RoundUp(count * sizeof(T), sizeof(Value)); T* buffer = static_cast(cx->nursery().allocateBuffer(obj, nbytes)); if (!buffer) { ReportOutOfMemory(cx); diff --git a/js/src/gc/Nursery.cpp b/js/src/gc/Nursery.cpp index e58810a82fea..324a089506e8 100644 --- a/js/src/gc/Nursery.cpp +++ b/js/src/gc/Nursery.cpp @@ -140,8 +140,8 @@ void js::NurseryDecommitTask::queueRange( // Only save this to decommit later if there's at least one page to // decommit. - if (JS_ROUNDUP(newCapacity, SystemPageSize()) >= - JS_ROUNDDOWN(Nursery::NurseryChunkUsableSize, SystemPageSize())) { + if (RoundUp(newCapacity, SystemPageSize()) >= + RoundDown(Nursery::NurseryChunkUsableSize, SystemPageSize())) { // Clear the existing decommit request because it may be a larger request // for the same chunk. partialChunk = nullptr; @@ -384,7 +384,7 @@ void js::Nursery::enterZealMode() { JS_FRESH_NURSERY_PATTERN, MemCheckKind::MakeUndefined); } - capacity_ = JS_ROUNDUP(tunables().gcMaxNurseryBytes(), ChunkSize); + capacity_ = RoundUp(tunables().gcMaxNurseryBytes(), ChunkSize); setCurrentEnd(); } } @@ -445,8 +445,7 @@ Cell* js::Nursery::allocateString(Zone* zone, size_t size, AllocKind kind) { // RelocationOverlay. MOZ_ASSERT(size >= sizeof(RelocationOverlay)); - size_t allocSize = - JS_ROUNDUP(sizeof(StringLayout) - 1 + size, CellAlignBytes); + size_t allocSize = RoundUp(sizeof(StringLayout) - 1 + size, CellAlignBytes); auto header = static_cast(allocate(allocSize)); if (!header) { return nullptr; @@ -1327,7 +1326,7 @@ bool js::Nursery::allocateNextChunk(const unsigned chunkno, MOZ_ASSERT((chunkno == currentChunk_ + 1) || (chunkno == 0 && allocatedChunkCount() == 0)); MOZ_ASSERT(chunkno == allocatedChunkCount()); - MOZ_ASSERT(chunkno < JS_HOWMANY(capacity(), ChunkSize)); + MOZ_ASSERT(chunkno < HowMany(capacity(), ChunkSize)); if (!chunks_.resize(newCount)) { return false; @@ -1442,10 +1441,10 @@ bool js::Nursery::maybeResizeExact(JS::GCReason reason) { size_t js::Nursery::roundSize(size_t size) { if (size >= ChunkSize) { - size = JS_ROUND(size, ChunkSize); + size = Round(size, ChunkSize); } else { - size = Min(JS_ROUND(size, SubChunkStep), - JS_ROUNDDOWN(NurseryChunkUsableSize, SubChunkStep)); + size = Min(Round(size, SubChunkStep), + RoundDown(NurseryChunkUsableSize, SubChunkStep)); } MOZ_ASSERT(size >= ArenaSize); return size; @@ -1529,7 +1528,7 @@ void js::Nursery::shrinkAllocableSpace(size_t newCapacity) { } MOZ_ASSERT(newCapacity < capacity_); - unsigned newCount = JS_HOWMANY(newCapacity, ChunkSize); + unsigned newCount = HowMany(newCapacity, ChunkSize); if (newCount < allocatedChunkCount()) { freeChunksFrom(newCount); } diff --git a/js/src/gc/Nursery.h b/js/src/gc/Nursery.h index 74ab44c52f33..1d0986ab4317 100644 --- a/js/src/gc/Nursery.h +++ b/js/src/gc/Nursery.h @@ -194,7 +194,7 @@ class Nursery { // collection. unsigned maxChunkCount() const { MOZ_ASSERT(capacity()); - return JS_HOWMANY(capacity(), gc::ChunkSize); + return HowMany(capacity(), gc::ChunkSize); } void enable(); diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp index 8ff8ea1fc425..54ccc82ad20a 100644 --- a/js/src/jit/MacroAssembler.cpp +++ b/js/src/jit/MacroAssembler.cpp @@ -927,9 +927,9 @@ static void AllocateAndInitTypedArrayBuffer(JSContext* cx, size_t nbytes = count * obj->bytesPerElement(); MOZ_ASSERT((CheckedUint32(nbytes) + sizeof(Value)).isValid(), - "JS_ROUNDUP must not overflow"); + "RoundUp must not overflow"); - nbytes = JS_ROUNDUP(nbytes, sizeof(Value)); + nbytes = RoundUp(nbytes, sizeof(Value)); void* buf = cx->nursery().allocateZeroedBuffer(obj, nbytes, js::ArrayBufferContentsArena); if (buf) { diff --git a/js/src/jit/shared/AtomicOperations-shared-jit.cpp b/js/src/jit/shared/AtomicOperations-shared-jit.cpp index d57bffb38cd1..19272779d61c 100644 --- a/js/src/jit/shared/AtomicOperations-shared-jit.cpp +++ b/js/src/jit/shared/AtomicOperations-shared-jit.cpp @@ -659,8 +659,7 @@ void AtomicMemcpyDownUnsynchronized(uint8_t* dest, const uint8_t* src, void (*copyWord)(uint8_t * dest, const uint8_t* src); if (((uintptr_t(dest) ^ uintptr_t(src)) & WORDMASK) == 0) { - const uint8_t* cutoff = - (const uint8_t*)JS_ROUNDUP(uintptr_t(src), WORDSIZE); + const uint8_t* cutoff = (const uint8_t*)RoundUp(uintptr_t(src), WORDSIZE); MOZ_ASSERT(cutoff <= lim); // because nbytes >= WORDSIZE while (src < cutoff) { AtomicCopyByteUnsynchronized(dest++, src++); @@ -861,7 +860,7 @@ bool InitializeJittedAtomics() { // Allocate executable memory. uint32_t codeLength = masm.bytesNeeded(); - size_t roundedCodeLength = JS_ROUNDUP(codeLength, ExecutableCodePageSize); + size_t roundedCodeLength = RoundUp(codeLength, ExecutableCodePageSize); uint8_t* code = (uint8_t*)AllocateExecutableMemory( roundedCodeLength, ProtectionSetting::Writable, MemCheckKind::MakeUndefined); diff --git a/js/src/jstypes.h b/js/src/jstypes.h index ddaab188842b..8d24a3aaf02c 100644 --- a/js/src/jstypes.h +++ b/js/src/jstypes.h @@ -25,6 +25,9 @@ #include "mozilla/Casting.h" #include "mozilla/Types.h" +#include +#include + // jstypes.h is (or should be!) included by every file in SpiderMonkey. // js-config.h also should be included by every file. So include it here. // XXX: including it in js/RequiredDefines.h should be a better option, since @@ -77,24 +80,34 @@ while (0) /*********************************************************************** -** MACROS: JS_BIT -** JS_BITMASK +** FUNCTIONS: Bit +** BitMask ** DESCRIPTION: -** Bit masking macros. XXX n must be <= 31 to be portable +** Bit masking functions. XXX n must be <= 31 to be portable ***********************************************************************/ -#define JS_BIT(n) ((uint32_t)1 << (n)) -#define JS_BITMASK(n) (JS_BIT(n) - 1) +namespace js { +constexpr uint32_t Bit(uint32_t n) { return uint32_t(1) << n; } + +constexpr uint32_t BitMask(uint32_t n) { return Bit(n) - 1; } +} // namespace js /*********************************************************************** -** MACROS: JS_HOWMANY -** JS_ROUNDUP +** FUNCTIONS: HowMany +** RoundUp +** RoundDown +** Round ** DESCRIPTION: -** Commonly used macros for operations on compatible types. +** Commonly used functions for operations on compatible types. ***********************************************************************/ -#define JS_HOWMANY(x, y) (((x) + (y)-1) / (y)) -#define JS_ROUNDUP(x, y) (JS_HOWMANY(x, y) * (y)) -#define JS_ROUNDDOWN(x, y) (((x) / (y)) * (y)) -#define JS_ROUND(x, y) ((((x) + (y) / 2) / (y)) * (y)) +namespace js { +constexpr size_t HowMany(size_t x, size_t y) { return (x + y - 1) / y; } + +constexpr size_t RoundUp(size_t x, size_t y) { return HowMany(x, y) * y; } + +constexpr size_t RoundDown(size_t x, size_t y) { return (x / y) * y; } + +constexpr size_t Round(size_t x, size_t y) { return ((x + y / 2) / y) * y; } +} // namespace js #if defined(JS_64BIT) # define JS_BITS_PER_WORD 64 diff --git a/js/src/vm/ArrayBufferObject.cpp b/js/src/vm/ArrayBufferObject.cpp index 4b7a7917e533..ddc4ee934621 100644 --- a/js/src/vm/ArrayBufferObject.cpp +++ b/js/src/vm/ArrayBufferObject.cpp @@ -759,7 +759,7 @@ static bool CreateSpecificWasmBuffer( uint32_t cur = clampedMaxSize.value() / 2; for (; cur > initialSize; cur /= 2) { - uint32_t clampedMaxSize = JS_ROUNDUP(cur, wasm::PageSize); + uint32_t clampedMaxSize = RoundUp(cur, wasm::PageSize); buffer = RawbufT::Allocate(initialSize, Some(clampedMaxSize), mappedSize); if (buffer) { break; @@ -774,7 +774,7 @@ static bool CreateSpecificWasmBuffer( // Try to grow our chunk as much as possible. for (size_t d = cur / 2; d >= wasm::PageSize; d /= 2) { - buffer->tryGrowMaxSizeInPlace(JS_ROUNDUP(d, wasm::PageSize)); + buffer->tryGrowMaxSizeInPlace(RoundUp(d, wasm::PageSize)); } } @@ -974,7 +974,7 @@ inline size_t ArrayBufferObject::associatedBytes() const { if (bufferKind() == MALLOCED) { return byteLength(); } else if (bufferKind() == MAPPED) { - return JS_ROUNDUP(byteLength(), js::gc::SystemPageSize()); + return RoundUp(byteLength(), js::gc::SystemPageSize()); } else { MOZ_CRASH("Unexpected buffer kind"); } @@ -1168,7 +1168,7 @@ ArrayBufferObject* ArrayBufferObject::createForContents( } else if (contents.kind() == EXTERNAL) { // Store the FreeInfo in the inline data slots so that we // don't use up slots for it in non-refcounted array buffers. - size_t freeInfoSlots = JS_HOWMANY(sizeof(FreeInfo), sizeof(Value)); + size_t freeInfoSlots = HowMany(sizeof(FreeInfo), sizeof(Value)); MOZ_ASSERT(reservedSlots + freeInfoSlots <= NativeObject::MAX_FIXED_SLOTS, "FreeInfo must fit in inline slots"); nslots += freeInfoSlots; @@ -1176,7 +1176,7 @@ ArrayBufferObject* ArrayBufferObject::createForContents( // The ABO is taking ownership, so account the bytes against the zone. nAllocated = nbytes; if (contents.kind() == MAPPED) { - nAllocated = JS_ROUNDUP(nbytes, js::gc::SystemPageSize()); + nAllocated = RoundUp(nbytes, js::gc::SystemPageSize()); } else { MOZ_ASSERT(contents.kind() == MALLOCED, "should have handled all possible callers' kinds"); @@ -1220,7 +1220,7 @@ ArrayBufferObject* ArrayBufferObject::createZeroed( size_t nslots = JSCLASS_RESERVED_SLOTS(&class_); uint8_t* data; if (nbytes <= MaxInlineBytes) { - int newSlots = JS_HOWMANY(nbytes, sizeof(Value)); + int newSlots = HowMany(nbytes, sizeof(Value)); MOZ_ASSERT(int(nbytes) <= newSlots * int(sizeof(Value))); nslots += newSlots; diff --git a/js/src/vm/BigIntType.h b/js/src/vm/BigIntType.h index 150ed503f11b..330959e64c16 100644 --- a/js/src/vm/BigIntType.h +++ b/js/src/vm/BigIntType.h @@ -48,7 +48,7 @@ class BigInt final private: // The low NumFlagBitsReservedForGC flag bits are reserved. - static constexpr uintptr_t SignBit = JS_BIT(Base::NumFlagBitsReservedForGC); + static constexpr uintptr_t SignBit = js::Bit(Base::NumFlagBitsReservedForGC); static constexpr size_t InlineDigitsLength = (js::gc::MinCellSize - sizeof(Base)) / sizeof(Digit); diff --git a/js/src/vm/RegExpObject.h b/js/src/vm/RegExpObject.h index 895f8e52544f..7f0eed408683 100644 --- a/js/src/vm/RegExpObject.h +++ b/js/src/vm/RegExpObject.h @@ -45,6 +45,13 @@ namespace frontend { class TokenStreamAnyChars; } +// Temporary definitions until irregexp is updated from upstream. +namespace irregexp { +constexpr size_t JS_HOWMANY(size_t x, size_t y) { return (x + y - 1) / y; } + +constexpr size_t JS_ROUNDUP(size_t x, size_t y) { return JS_HOWMANY(x, y) * y; } +} // namespace irregexp + extern RegExpObject* RegExpAlloc(JSContext* cx, NewObjectKind newKind, HandleObject proto = nullptr); diff --git a/js/src/vm/Shape-inl.h b/js/src/vm/Shape-inl.h index 3728d4639ed2..4ee3672e0ae3 100644 --- a/js/src/vm/Shape-inl.h +++ b/js/src/vm/Shape-inl.h @@ -291,7 +291,7 @@ MOZ_ALWAYS_INLINE ShapeTable::Entry& ShapeTable::searchUnchecked(jsid id) { /* Collision: double hash. */ uint32_t sizeLog2 = HASH_BITS - hashShift_; HashNumber hash2 = Hash2(hash0, sizeLog2, hashShift_); - uint32_t sizeMask = JS_BITMASK(sizeLog2); + uint32_t sizeMask = BitMask(sizeLog2); /* Save the first removed entry pointer so we can recycle it if adding. */ Entry* firstRemoved; diff --git a/js/src/vm/Shape.cpp b/js/src/vm/Shape.cpp index 0a0c8d52cec8..bdabf39676dd 100644 --- a/js/src/vm/Shape.cpp +++ b/js/src/vm/Shape.cpp @@ -46,7 +46,7 @@ bool ShapeIC::init(JSContext* cx) { bool ShapeTable::init(JSContext* cx, Shape* lastProp) { uint32_t sizeLog2 = CeilingLog2Size(entryCount_); - uint32_t size = JS_BIT(sizeLog2); + uint32_t size = Bit(sizeLog2); if (entryCount_ >= size - (size >> 2)) { sizeLog2++; } @@ -54,7 +54,7 @@ bool ShapeTable::init(JSContext* cx, Shape* lastProp) { sizeLog2 = MIN_SIZE_LOG2; } - size = JS_BIT(sizeLog2); + size = Bit(sizeLog2); entries_.reset(cx->pod_calloc(size)); if (!entries_) { return false; @@ -225,8 +225,8 @@ bool ShapeTable::change(JSContext* cx, int log2Delta) { */ uint32_t oldLog2 = HASH_BITS - hashShift_; uint32_t newLog2 = oldLog2 + log2Delta; - uint32_t oldSize = JS_BIT(oldLog2); - uint32_t newSize = JS_BIT(newLog2); + uint32_t oldSize = Bit(oldLog2); + uint32_t newSize = Bit(newLog2); Entry* newTable = cx->maybe_pod_calloc(newSize); if (!newTable) { return false; diff --git a/js/src/vm/Shape.h b/js/src/vm/Shape.h index 9cbf9927fca8..09634ac73d98 100644 --- a/js/src/vm/Shape.h +++ b/js/src/vm/Shape.h @@ -207,8 +207,8 @@ typedef JSGetterOp GetterOp; typedef JSSetterOp SetterOp; /* Limit on the number of slotful properties in an object. */ -static const uint32_t SHAPE_INVALID_SLOT = JS_BIT(24) - 1; -static const uint32_t SHAPE_MAXIMUM_SLOT = JS_BIT(24) - 2; +static const uint32_t SHAPE_INVALID_SLOT = Bit(24) - 1; +static const uint32_t SHAPE_MAXIMUM_SLOT = Bit(24) - 2; enum class MaybeAdding { Adding = true, NotAdding = false }; @@ -337,7 +337,7 @@ class ShapeTable { // This value is low because it's common for a ShapeTable to be created // with an entryCount of zero. static const uint32_t MIN_SIZE_LOG2 = 2; - static const uint32_t MIN_SIZE = JS_BIT(MIN_SIZE_LOG2); + static const uint32_t MIN_SIZE = Bit(MIN_SIZE_LOG2); uint32_t hashShift_; /* multiplicative hash shift */ @@ -415,7 +415,7 @@ class ShapeTable { } // By definition, hashShift = HASH_BITS - log2(capacity). - uint32_t capacity() const { return JS_BIT(HASH_BITS - hashShift_); } + uint32_t capacity() const { return Bit(HASH_BITS - hashShift_); } // Whether we need to grow. We want to do this if the load factor // is >= 0.75 @@ -903,7 +903,7 @@ class Shape : public gc::TenuredCell { // For other shapes in the property tree with a parent, stores the // parent's slot index (which may be invalid), and invalid for all // other shapes. - SLOT_MASK = JS_BIT(24) - 1, + SLOT_MASK = BitMask(24), // Number of fixed slots in objects with this shape. // FIXED_SLOTS_MAX is the biggest count of fixed slots a Shape can store. diff --git a/js/src/vm/StringType.h b/js/src/vm/StringType.h index bfb7080bf101..0bdc5cf3cd4d 100644 --- a/js/src/vm/StringType.h +++ b/js/src/vm/StringType.h @@ -262,17 +262,17 @@ class JSString : public js::gc::CellWithLengthAndFlags { "JSString::flags must reserve enough bits for Cell"); static const uint32_t NON_ATOM_BIT = js::gc::Cell::JSSTRING_BIT; - static const uint32_t LINEAR_BIT = JS_BIT(4); - static const uint32_t DEPENDENT_BIT = JS_BIT(5); - static const uint32_t INLINE_CHARS_BIT = JS_BIT(6); + static const uint32_t LINEAR_BIT = js::Bit(4); + static const uint32_t DEPENDENT_BIT = js::Bit(5); + static const uint32_t INLINE_CHARS_BIT = js::Bit(6); static const uint32_t EXTENSIBLE_FLAGS = - NON_ATOM_BIT | LINEAR_BIT | JS_BIT(7); - static const uint32_t EXTERNAL_FLAGS = NON_ATOM_BIT | LINEAR_BIT | JS_BIT(8); + NON_ATOM_BIT | LINEAR_BIT | js::Bit(7); + static const uint32_t EXTERNAL_FLAGS = NON_ATOM_BIT | LINEAR_BIT | js::Bit(8); - static const uint32_t FAT_INLINE_MASK = INLINE_CHARS_BIT | JS_BIT(7); - static const uint32_t PERMANENT_ATOM_MASK = NON_ATOM_BIT | JS_BIT(8); - static const uint32_t PERMANENT_ATOM_FLAGS = JS_BIT(8); + static const uint32_t FAT_INLINE_MASK = INLINE_CHARS_BIT | js::Bit(7); + static const uint32_t PERMANENT_ATOM_MASK = NON_ATOM_BIT | js::Bit(8); + static const uint32_t PERMANENT_ATOM_FLAGS = js::Bit(8); /* Initial flags for thin inline and fat inline strings. */ static const uint32_t INIT_THIN_INLINE_FLAGS = @@ -285,14 +285,14 @@ class JSString : public js::gc::CellWithLengthAndFlags { NON_ATOM_BIT | LINEAR_BIT | DEPENDENT_BIT; static const uint32_t TYPE_FLAGS_MASK = - JS_BITMASK(9) - JS_BITMASK(3) + js::gc::Cell::JSSTRING_BIT; + js::BitMask(9) - js::BitMask(3) + js::gc::Cell::JSSTRING_BIT; - static const uint32_t LATIN1_CHARS_BIT = JS_BIT(9); + static const uint32_t LATIN1_CHARS_BIT = js::Bit(9); - static const uint32_t INDEX_VALUE_BIT = JS_BIT(10); + static const uint32_t INDEX_VALUE_BIT = js::Bit(10); static const uint32_t INDEX_VALUE_SHIFT = 16; - static const uint32_t PINNED_ATOM_BIT = JS_BIT(11); + static const uint32_t PINNED_ATOM_BIT = js::Bit(11); static const uint32_t MAX_LENGTH = js::MaxStringLength; diff --git a/js/src/vm/StructuredClone.cpp b/js/src/vm/StructuredClone.cpp index e2ad936e2ace..cbfdd2904125 100644 --- a/js/src/vm/StructuredClone.cpp +++ b/js/src/vm/StructuredClone.cpp @@ -2036,14 +2036,14 @@ JSString* JSStructuredCloneReader::readStringImpl(uint32_t nchars) { } JSString* JSStructuredCloneReader::readString(uint32_t data) { - uint32_t nchars = data & JS_BITMASK(31); + uint32_t nchars = data & BitMask(31); bool latin1 = data & (1 << 31); return latin1 ? readStringImpl(nchars) : readStringImpl(nchars); } BigInt* JSStructuredCloneReader::readBigInt(uint32_t data) { - size_t length = data & JS_BITMASK(31); + size_t length = data & BitMask(31); bool isNegative = data & (1 << 31); if (length == 0) { return BigInt::zero(context()); diff --git a/js/src/vm/TypedArrayObject.cpp b/js/src/vm/TypedArrayObject.cpp index 554f8eb8225d..7b8fbaed1b15 100644 --- a/js/src/vm/TypedArrayObject.cpp +++ b/js/src/vm/TypedArrayObject.cpp @@ -124,7 +124,7 @@ bool TypedArrayObject::ensureHasBuffer(JSContext* cx, // If the object is in the nursery, the buffer will be freed by the next // nursery GC. Free the data slot pointer if the object has no inline data. - size_t nbytes = JS_ROUNDUP(tarray->byteLength(), sizeof(Value)); + size_t nbytes = RoundUp(tarray->byteLength(), sizeof(Value)); Nursery& nursery = cx->nursery(); if (tarray->isTenured() && !tarray->hasInlineElements() && !nursery.isInside(tarray->elements())) { @@ -170,7 +170,7 @@ void TypedArrayObject::finalize(JSFreeOp* fop, JSObject* obj) { // Free the data slot pointer if it does not point into the old JSObject. if (!curObj->hasInlineElements()) { - size_t nbytes = JS_ROUNDUP(curObj->byteLength(), sizeof(Value)); + size_t nbytes = RoundUp(curObj->byteLength(), sizeof(Value)); fop->free_(obj, curObj->elements(), nbytes, MemoryUse::TypedArrayElements); } } @@ -207,7 +207,7 @@ size_t TypedArrayObject::objectMoved(JSObject* obj, JSObject* old) { Nursery& nursery = obj->runtimeFromMainThread()->gc.nursery(); if (!nursery.isInside(buf)) { nursery.removeMallocedBuffer(buf); - size_t nbytes = JS_ROUNDUP(newObj->byteLength(), sizeof(Value)); + size_t nbytes = RoundUp(newObj->byteLength(), sizeof(Value)); AddCellMemory(newObj, nbytes, MemoryUse::TypedArrayElements); return 0; } @@ -236,10 +236,10 @@ size_t TypedArrayObject::objectMoved(JSObject* obj, JSObject* old) { } else { MOZ_ASSERT(!oldObj->hasInlineElements()); MOZ_ASSERT((CheckedUint32(nbytes) + sizeof(Value)).isValid(), - "JS_ROUNDUP must not overflow"); + "RoundUp must not overflow"); AutoEnterOOMUnsafeRegion oomUnsafe; - nbytes = JS_ROUNDUP(nbytes, sizeof(Value)); + nbytes = RoundUp(nbytes, sizeof(Value)); void* data = newObj->zone()->pod_arena_malloc( js::ArrayBufferContentsArena, nbytes); if (!data) { @@ -566,9 +566,9 @@ class TypedArrayObjectTemplate : public TypedArrayObject { if (!fitsInline) { MOZ_ASSERT(len > 0); MOZ_ASSERT((CheckedUint32(nbytes) + sizeof(Value)).isValid(), - "JS_ROUNDUP must not overflow"); + "RoundUp must not overflow"); - nbytes = JS_ROUNDUP(nbytes, sizeof(Value)); + nbytes = RoundUp(nbytes, sizeof(Value)); buf = cx->nursery().allocateZeroedBuffer(obj, nbytes, js::ArrayBufferContentsArena); if (!buf) { diff --git a/js/src/wasm/WasmCode.cpp b/js/src/wasm/WasmCode.cpp index e6f7f236990a..6b52724e4d42 100644 --- a/js/src/wasm/WasmCode.cpp +++ b/js/src/wasm/WasmCode.cpp @@ -107,7 +107,7 @@ CodeSegment::~CodeSegment() { static uint32_t RoundupCodeLength(uint32_t codeLength) { // AllocateExecutableMemory() requires a multiple of ExecutableCodePageSize. - return JS_ROUNDUP(codeLength, ExecutableCodePageSize); + return RoundUp(codeLength, ExecutableCodePageSize); } /* static */ diff --git a/js/xpconnect/src/xpcprivate.h b/js/xpconnect/src/xpcprivate.h index 52d06e73c7f4..a464b7a06efe 100644 --- a/js/xpconnect/src/xpcprivate.h +++ b/js/xpconnect/src/xpcprivate.h @@ -1553,7 +1553,7 @@ class XPCWrappedNative final : public nsIXPConnectWrappedNative { private: enum { // Flags bits for mFlatJSObject: - FLAT_JS_OBJECT_VALID = JS_BIT(0) + FLAT_JS_OBJECT_VALID = js::Bit(0) }; bool Init(JSContext* cx, nsIXPCScriptable* scriptable);